Skip to content

Commit

Permalink
remove namespace for dist attr and process mesh (#56449)
Browse files Browse the repository at this point in the history
  • Loading branch information
LiYuRio authored Aug 21, 2023
1 parent 95c4bb4 commit 1f94081
Show file tree
Hide file tree
Showing 18 changed files with 39 additions and 56 deletions.
6 changes: 4 additions & 2 deletions paddle/fluid/distributed/auto_parallel/dist_attr.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,10 @@ class VarDesc;
} // namespace framework

namespace distributed {

using phi::distributed::ProcessMesh;
using phi::distributed::TensorDistAttr;

namespace auto_parallel {

using framework::BlockDesc;
Expand All @@ -48,8 +52,6 @@ using framework::ProgramDesc;
using framework::VarDesc;

using phi::distributed::auto_parallel::OperatorDistAttrProto;
using phi::distributed::auto_parallel::ProcessMesh;
using phi::distributed::auto_parallel::TensorDistAttr;

constexpr const char* kDefault = "default";

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ limitations under the License. */

namespace paddle {
namespace distributed {
namespace auto_parallel {
using phi::distributed::ProcessMesh;
using phi::distributed::TensorDistAttr;

using phi::distributed::auto_parallel::ProcessMesh;
using phi::distributed::auto_parallel::TensorDistAttr;
namespace auto_parallel {

/**
* A unified data class for inferring distributed attributes
Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/eager/grad_tensor_holder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,7 @@ void GradTensorHolder::CopyValueFromTensor(size_t slot_id,
auto dist_tensor = std::make_shared<phi::distributed::DistTensor>(
dense_temp,
dense_temp->meta(),
std::make_shared<
phi::distributed::auto_parallel::TensorDistAttr>());
std::make_shared<phi::distributed::TensorDistAttr>());
temp.set_impl(dist_tensor);
buffer_[slot_id][rank] = temp;
#endif
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/var_desc.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ limitations under the License. */
namespace paddle {
namespace framework {

using phi::distributed::auto_parallel::TensorDistAttr;
using phi::distributed::TensorDistAttr;

// convert between std::vector and protobuf repeated.
template <typename T>
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/auto_parallel_py.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,21 +40,21 @@ namespace paddle {
namespace pybind {

using paddle::distributed::auto_parallel::DistTensorSpec;
using paddle::distributed::auto_parallel::kDefault;
using paddle::distributed::auto_parallel::OperatorDistAttr;
using paddle::distributed::auto_parallel::SPMDRuleBase;
using paddle::distributed::auto_parallel::SPMDRuleMap;
using paddle::framework::OpDesc;
using paddle::framework::VarDesc;
using phi::distributed::ProcessMesh;
using phi::distributed::TensorDistAttr;
using phi::distributed::auto_parallel::Device;
using phi::distributed::auto_parallel::DeviceCapability;
using phi::distributed::auto_parallel::DeviceMesh;
using phi::distributed::auto_parallel::DistributedMapper;
using phi::distributed::auto_parallel::kDefault;
using phi::distributed::auto_parallel::Link;
using phi::distributed::auto_parallel::LinkCapability;
using phi::distributed::auto_parallel::Machine;
using phi::distributed::auto_parallel::ProcessMesh;
using phi::distributed::auto_parallel::TensorDistAttr;

PyTypeObject *g_tensor_dist_attr_pytype = nullptr;

Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/eager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ limitations under the License. */
#include "paddle/phi/core/distributed/auto_parallel/dist_attr.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h"
using phi::distributed::DistTensor;
using phi::distributed::auto_parallel::TensorDistAttr;
using phi::distributed::TensorDistAttr;
#endif

namespace paddle {
Expand Down Expand Up @@ -737,7 +737,7 @@ Tensor is the basic data structure in PaddlePaddle. There are some ways to creat
* ** zero_copy: bool,
* ** name: std::string,
* ** stop_gradient: bool,
* ** dist_attr: phi::distributed::auto_parallel::TensorDistAttr)
* ** dist_attr: phi::distributed::TensorDistAttr)
* 4.
* def __init__ (
* ** value: ndarray)
Expand All @@ -751,7 +751,7 @@ Tensor is the basic data structure in PaddlePaddle. There are some ways to creat
* ** tensor: Tensor,
* ** place: paddle::platform::Place,
* ** name: std::string,
* ** dist_attr: phi::distributed::auto_parallel::TensorDistAttr)
* ** dist_attr: phi::distributed::TensorDistAttr)
* 7. (multi-place) (should have at least one parameter, one parameter similar
* to case 5, zero parameter equals to case 1.)
* def __init__ (
Expand Down
5 changes: 2 additions & 3 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
}

#ifdef PADDLE_WITH_DISTRIBUTE
using phi::distributed::auto_parallel::TensorDistAttr;
using phi::distributed::TensorDistAttr;
std::shared_ptr<TensorDistAttr> CastPyArg2DistAttr(PyObject* obj,
ssize_t arg_pos) {
if (PyObject_IsInstance(
Expand Down Expand Up @@ -891,8 +891,7 @@ PyObject* ToPyObject(const phi::distributed::DistTensor* value) {
return obj.ptr();
}

PyObject* ToPyObject(
const phi::distributed::auto_parallel::TensorDistAttr* value) {
PyObject* ToPyObject(const phi::distributed::TensorDistAttr* value) {
auto obj = ::pybind11::cast(value, py::return_value_policy::reference);
obj.inc_ref();
return obj.ptr();
Expand Down
7 changes: 3 additions & 4 deletions paddle/fluid/pybind/eager_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,7 @@ PyObject* ToPyObject(const platform::Place& value);
PyObject* ToPyObject(const phi::DenseTensor* value);
#ifdef PADDLE_WITH_DISTRIBUTE
PyObject* ToPyObject(const phi::distributed::DistTensor* value);
PyObject* ToPyObject(
const phi::distributed::auto_parallel::TensorDistAttr* value);
PyObject* ToPyObject(const phi::distributed::TensorDistAttr* value);
#endif
PyObject* ToPyObject(const phi::SelectedRows* value);
PyObject* ToPyObject(const paddle::framework::proto::VarType::Type& dtype);
Expand Down Expand Up @@ -314,8 +313,8 @@ paddle::DataType CastPyArg2DataTypeDirectly(PyObject* obj,
ssize_t arg_pos);

#ifdef PADDLE_WITH_DISTRIBUTE
std::shared_ptr<phi::distributed::auto_parallel::TensorDistAttr>
CastPyArg2DistAttr(PyObject* obj, ssize_t arg_pos);
std::shared_ptr<phi::distributed::TensorDistAttr> CastPyArg2DistAttr(
PyObject* obj, ssize_t arg_pos);
#endif

paddle::optional<paddle::Tensor> GetOptionalTensorFromArgs(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/protobuf.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ PyTypeObject *g_blockdesc_pytype = nullptr;
namespace pd = paddle::framework;
namespace jit = paddle::jit;

using paddle::distributed::TensorDistAttr;
using paddle::distributed::auto_parallel::OperatorDistAttr;
using paddle::distributed::auto_parallel::TensorDistAttr;

template <typename T>
static pybind11::bytes SerializeMessage(
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/api/lib/api_gen_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -541,8 +541,7 @@ phi::distributed::DistTensor* SetKernelDistOutput(Tensor* out) {
if (out->impl() == nullptr) {
auto dense_t = std::make_shared<phi::DenseTensor>();
// TODO(chenweihang): polish code, dist_attr is null now
auto dist_attr =
std::make_shared<phi::distributed::auto_parallel::TensorDistAttr>();
auto dist_attr = std::make_shared<phi::distributed::TensorDistAttr>();
auto dist_t = std::make_shared<phi::distributed::DistTensor>(
dense_t, phi::DenseTensorMeta(), dist_attr);
out->set_impl(dist_t);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/distributed/auto_parallel/dist_attr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ limitations under the License. */

namespace phi {
namespace distributed {
namespace auto_parallel {
using phi::distributed::auto_parallel::str_join;
using phi::distributed::auto_parallel::TensorDistAttrProto;

// partial is not allow annotated by user by now.
std::vector<std::string> TensorDistAttr::fields_{
Expand Down Expand Up @@ -343,6 +344,5 @@ std::string TensorDistAttr::partial_status_string() const {
return partial_status_str;
}

} // namespace auto_parallel
} // namespace distributed
} // namespace phi
8 changes: 2 additions & 6 deletions paddle/phi/core/distributed/auto_parallel/dist_attr.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ limitations under the License. */

namespace phi {
namespace distributed {
namespace auto_parallel {

constexpr const char* kDefault = "default";

class TensorDistAttr {
public:
Expand Down Expand Up @@ -125,9 +122,9 @@ class TensorDistAttr {
// in partial-support-stage-I partial will always be a runtime attribute,
// there is not need to serialize it. support the partial serialization in
// future partial-support-stage-II.
void from_proto(const TensorDistAttrProto& proto);
void from_proto(const auto_parallel::TensorDistAttrProto& proto);

TensorDistAttrProto to_proto() const;
auto_parallel::TensorDistAttrProto to_proto() const;

std::string serialize_to_string();

Expand Down Expand Up @@ -157,6 +154,5 @@ inline bool operator!=(const TensorDistAttr& lhs, const TensorDistAttr& rhs) {
return !operator==(lhs, rhs);
}

} // namespace auto_parallel
} // namespace distributed
} // namespace phi
4 changes: 0 additions & 4 deletions paddle/phi/core/distributed/auto_parallel/dist_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,7 @@ namespace phi {
class DenseTensorUtils;

namespace distributed {

namespace auto_parallel {
class TensorDistAttr;
}
using auto_parallel::TensorDistAttr;

class DistTensor final
: public phi::TensorBase,
Expand Down
9 changes: 6 additions & 3 deletions paddle/phi/core/distributed/auto_parallel/process_mesh.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/core/distributed/auto_parallel/process_mesh.h"

#include <algorithm>
#include <iterator>

#include "paddle/phi/core/distributed/auto_parallel/process_mesh.h"
#include "paddle/phi/core/distributed/auto_parallel/utils.h"

namespace phi {
namespace distributed {
namespace auto_parallel {

using phi::distributed::auto_parallel::has_duplicates;
using phi::distributed::auto_parallel::ProcessMeshProto;
using phi::distributed::auto_parallel::str_join;

ProcessMesh::ProcessMesh(const std::vector<int64_t> &shape,
const std::vector<int64_t> &process_ids,
Expand Down Expand Up @@ -129,6 +133,5 @@ bool operator==(const ProcessMesh &lhs, const ProcessMesh &rhs) {
return true;
}

} // namespace auto_parallel
} // namespace distributed
} // namespace phi
8 changes: 3 additions & 5 deletions paddle/phi/core/distributed/auto_parallel/process_mesh.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ limitations under the License. */

namespace phi {
namespace distributed {
namespace auto_parallel {

class ProcessMesh {
public:
Expand All @@ -48,7 +47,7 @@ class ProcessMesh {
int64_t ndim() const { return shape_.size(); }

int64_t dim_size(int64_t dim) const {
int64_t cdim = canonical_dim(dim, shape_.size());
int64_t cdim = auto_parallel::canonical_dim(dim, shape_.size());
return shape_[cdim];
}

Expand All @@ -68,8 +67,8 @@ class ProcessMesh {
// ProcessMesh from_string(const std::string& mesh_str);
std::string to_string() const;

static ProcessMesh from_proto(const ProcessMeshProto& proto);
ProcessMeshProto to_proto() const;
static ProcessMesh from_proto(const auto_parallel::ProcessMeshProto& proto);
auto_parallel::ProcessMeshProto to_proto() const;

private:
std::vector<int64_t> shape_;
Expand All @@ -88,6 +87,5 @@ inline bool operator!=(const ProcessMesh& lhs, const ProcessMesh& rhs) {
return !operator==(lhs, rhs);
}

} // namespace auto_parallel
} // namespace distributed
} // namespace phi
5 changes: 1 addition & 4 deletions paddle/phi/core/distributed/auto_parallel/reshard_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,9 @@ namespace phi {
class DeviceContext;

namespace distributed {
namespace auto_parallel {
class TensorDistAttr;
} // namespace auto_parallel

class DistTensor;
using auto_parallel::TensorDistAttr;
class TensorDistAttr;

class ReshardFunction {
public:
Expand Down
6 changes: 0 additions & 6 deletions paddle/phi/core/distributed/auto_parallel/reshard_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,7 @@ class DeviceContext;

namespace distributed {
class CommContext;

namespace auto_parallel {

class ProcessMesh;
} // namespace auto_parallel

using auto_parallel::ProcessMesh;

bool IsDimsMappingShard(const std::vector<int64_t>& dims_mapping);

Expand Down
5 changes: 3 additions & 2 deletions test/cpp/auto_parallel/dist_attr_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ limitations under the License. */
#include "glog/logging.h"
#include "gtest/gtest.h"

#include "paddle/fluid/distributed/auto_parallel/dist_attr.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_attr.h"

namespace phi {
namespace distributed {
Expand Down Expand Up @@ -127,7 +127,8 @@ TEST(DistAttr, ctor) {
EXPECT_EQ(out_dist_attr.verify(get_tensor_shape(out)), true);

OperatorDistAttr mul_dist_attr(*op);
EXPECT_EQ(mul_dist_attr.impl_type(), kDefault);
EXPECT_EQ(mul_dist_attr.impl_type(),
paddle::distributed::auto_parallel::kDefault);
EXPECT_EQ(mul_dist_attr.impl_idx(), 0);
EXPECT_EQ(mul_dist_attr.is_recompute(), false);
EXPECT_EQ(mul_dist_attr.is_annotated("process_mesh"), false);
Expand Down

0 comments on commit 1f94081

Please sign in to comment.