Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[clang-tidy] enable modernize-make-unique #55506

Merged
merged 1 commit into from
Jul 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ Checks: '
-modernize-deprecated-ios-base-aliases,
-modernize-loop-convert,
-modernize-make-shared,
-modernize-make-unique,
modernize-make-unique,
-modernize-pass-by-value,
-modernize-raw-string-literal,
-modernize-redundant-void-arg,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/op_compat_sensible_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ bool OpCompat::Judge(const OpDesc& op_desc, const std::string& pass_name) {

OpCompat& OpCompatSensiblePass::AddOpCompat(OpCompat&& op_compat) {
std::string name = op_compat.Name();
op_compat_judgers_[name].reset(new OpCompat(std::move(op_compat)));
op_compat_judgers_[name] = std::make_unique<OpCompat>(std::move(op_compat));
return *(op_compat_judgers_[name]);
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/op_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ void OpDesc::CopyFrom(const OpDesc &op_desc) {
// The record of original_id_ is only for auto parallel.
original_id_ = op_desc.original_id_;
if (op_desc.dist_attr_) {
dist_attr_.reset(new OperatorDistAttr(*op_desc.dist_attr_));
dist_attr_ = std::make_unique<OperatorDistAttr>(*op_desc.dist_attr_);
}
need_update_ = true;
}
Expand Down Expand Up @@ -1145,7 +1145,7 @@ OperatorDistAttr *OpDesc::MutableDistAttr() {
if (dist_attr_) {
return dist_attr_.get();
} else {
dist_attr_.reset(new OperatorDistAttr(*this));
dist_attr_ = std::make_unique<OperatorDistAttr>(*this);
return dist_attr_.get();
}
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/var_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ VarDesc::VarDesc(const VarDesc &other)
attrs_(other.attrs_),
original_id_(other.original_id_) {
if (other.dist_attr_) {
dist_attr_.reset(new TensorDistAttr(*other.dist_attr_));
dist_attr_ = std::make_unique<TensorDistAttr>(*other.dist_attr_);
}
need_updated_ = true;
}
Expand Down Expand Up @@ -442,7 +442,7 @@ TensorDistAttr *VarDesc::MutableDistAttr() {
return dist_attr_.get();
} else {
auto shape = paddle::distributed::auto_parallel::get_tensor_shape(this);
dist_attr_.reset(new TensorDistAttr(shape));
dist_attr_ = std::make_unique<TensorDistAttr>(shape);
return dist_attr_.get();
}
need_updated_ = true;
Expand Down
62 changes: 31 additions & 31 deletions paddle/fluid/inference/api/analysis_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,18 @@ PassStrategy *AnalysisConfig::pass_builder() const {
if (!pass_builder_.get()) {
if (use_gpu_) {
LOG(INFO) << "Create GPU IR passes";
pass_builder_.reset(new GpuPassStrategy);
pass_builder_ = std::make_unique<GpuPassStrategy>();
} else if (use_xpu_) {
pass_builder_.reset(new XpuPassStrategy);
pass_builder_ = std::make_unique<XpuPassStrategy>();
} else if (use_ipu_) {
LOG(INFO) << "Create IPU IR passes";
pass_builder_.reset(new IpuPassStrategy);
pass_builder_ = std::make_unique<IpuPassStrategy>();
} else if (use_custom_device_) {
LOG(INFO) << "Create CUSTOM DEVICE IR passes";
pass_builder_.reset(new CustomDevicePassStrategy);
pass_builder_ = std::make_unique<CustomDevicePassStrategy>();
} else {
LOG(INFO) << "Create CPU IR passes";
pass_builder_.reset(new CpuPassStrategy);
pass_builder_ = std::make_unique<CpuPassStrategy>();
}
} else if (pass_builder_->use_gpu() ^ use_gpu()) {
LOG(WARNING) << "The use_gpu flag is not compatible between Config and "
Expand Down Expand Up @@ -577,20 +577,20 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
false,
platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new GpuPassStrategy(
*static_cast<GpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<GpuPassStrategy>(
*static_cast<GpuPassStrategy *>(other.pass_builder()));
} else if (use_ipu_) {
pass_builder_.reset(new IpuPassStrategy(
*static_cast<IpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<IpuPassStrategy>(
*static_cast<IpuPassStrategy *>(other.pass_builder()));
} else if (use_xpu_) {
pass_builder_.reset(new XpuPassStrategy(
*static_cast<XpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<XpuPassStrategy>(
*static_cast<XpuPassStrategy *>(other.pass_builder()));
} else if (use_custom_device_) {
pass_builder_.reset(new CustomDevicePassStrategy(
*static_cast<CustomDevicePassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<CustomDevicePassStrategy>(
*static_cast<CustomDevicePassStrategy *>(other.pass_builder()));
} else {
pass_builder_.reset(new CpuPassStrategy(
*static_cast<CpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<CpuPassStrategy>(
*static_cast<CpuPassStrategy *>(other.pass_builder()));
}

#undef CP_MEMBER
Expand Down Expand Up @@ -663,7 +663,7 @@ void AnalysisConfig::SetMkldnnCacheCapacity(int capacity) {
void AnalysisConfig::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
if (!mkldnn_quantizer_config_)
mkldnn_quantizer_config_.reset(new MkldnnQuantizerConfig());
mkldnn_quantizer_config_ = std::make_unique<MkldnnQuantizerConfig>();
use_mkldnn_quantizer_ = true;
#else
LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
Expand Down Expand Up @@ -850,54 +850,54 @@ void AnalysisConfig::Update() {
((use_ipu() ^ pass_builder_->use_ipu())) ||
((use_custom_device() ^ pass_builder_->use_custom_device()))) {
if (use_gpu()) {
pass_builder_.reset(new GpuPassStrategy);
pass_builder_ = std::make_unique<GpuPassStrategy>();
} else if (use_ipu()) {
pass_builder_.reset(new IpuPassStrategy);
pass_builder_ = std::make_unique<IpuPassStrategy>();
} else if (use_xpu()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new XpuPassStrategy);
pass_builder_ = std::make_unique<XpuPassStrategy>();
} else if (use_custom_device()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between GPU and CustomDevice."));
pass_builder_.reset(new CustomDevicePassStrategy);
pass_builder_ = std::make_unique<CustomDevicePassStrategy>();
} else {
pass_builder_.reset(new CpuPassStrategy);
pass_builder_ = std::make_unique<CpuPassStrategy>();
}

} else {
if (use_gpu()) {
pass_builder_.reset(new GpuPassStrategy(
*static_cast<GpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<GpuPassStrategy>(
*static_cast<GpuPassStrategy *>(pass_builder_.get()));
} else if (use_ipu()) {
VLOG(1) << "IpuPassStrategy has been used.";
pass_builder_.reset(new IpuPassStrategy(
*static_cast<IpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<IpuPassStrategy>(
*static_cast<IpuPassStrategy *>(pass_builder_.get()));
} else if (use_xpu()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new XpuPassStrategy(
*static_cast<XpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<XpuPassStrategy>(
*static_cast<XpuPassStrategy *>(pass_builder_.get()));
} else if (use_custom_device()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between GPU and CustomDevice."));
pass_builder_.reset(new CustomDevicePassStrategy(
*static_cast<CustomDevicePassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<CustomDevicePassStrategy>(
*static_cast<CustomDevicePassStrategy *>(pass_builder_.get()));
} else {
pass_builder_.reset(new CpuPassStrategy(
*static_cast<CpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<CpuPassStrategy>(
*static_cast<CpuPassStrategy *>(pass_builder_.get()));
}
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/memory/allocation/allocator_facade.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1454,8 +1454,8 @@ void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) {
auto& allocator = cuda_graph_map_[id];
auto& ref_cnt = cuda_graph_ref_cnt_[id];
if (allocator.get() == nullptr) {
allocator.reset(
new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
allocator = std::make_unique<AllocatorFacadePrivate>(
/*allow_free_idle_chunk=*/false);
VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id;
} else {
VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/memory/allocation/buffered_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr<Allocator> allocator)
platform::errors::InvalidArgument(
"Underlying allocator of BufferedAllocator is NULL"));
if (underlying_allocator_->IsAllocThreadSafe()) {
mtx_.reset(new std::mutex());
mtx_ = std::make_unique<std::mutex>();
}
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/memory/allocation/thread_local_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ namespace allocation {
ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p)
: place_(p) {
if (platform::is_gpu_place(place_)) {
buddy_allocator_.reset(new memory::detail::BuddyAllocator(
buddy_allocator_ = std::make_unique<memory::detail::BuddyAllocator>(
std::unique_ptr<memory::detail::SystemAllocator>(
new memory::detail::GPUAllocator(place_.device)),
platform::GpuMinChunkSize(),
platform::GpuMaxChunkSize()));
platform::GpuMaxChunkSize());
} else {
PADDLE_THROW(platform::errors::Unavailable(
"Thread local allocator only supports CUDAPlace now."));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/device/gpu/gpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ class RecordedGpuMallocHelper {
explicit RecordedGpuMallocHelper(int dev_id, uint64_t limit_size = 0)
: dev_id_(dev_id), limit_size_(limit_size) {
if (NeedRecord()) {
mtx_.reset(new std::mutex());
mtx_ = std::make_unique<std::mutex>();
}

if (FLAGS_enable_gpu_memory_usage_log) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/backends/gpu/gpu_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1048,11 +1048,11 @@ void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); }

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
GPUPinnedContext::GPUPinnedContext() {
eigen_device_.reset(new Eigen::DefaultDevice());
eigen_device_ = std::make_unique<Eigen::DefaultDevice>();
}

GPUPinnedContext::GPUPinnedContext(GPUPinnedPlace place) : place_(place) {
eigen_device_.reset(new Eigen::DefaultDevice());
eigen_device_ = std::make_unique<Eigen::DefaultDevice>();
}

Eigen::DefaultDevice* GPUPinnedContext::eigen_device() const {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/stream.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ bool Stream::Init(const Place& place,
phi::DeviceManager::SetDevice(place_);
device_->CreateStream(this, priority, flag);

callback_manager_.reset(new CallbackManager(this));
callback_manager_ = std::make_unique<CallbackManager>(this);
VLOG(3) << "Init Stream: " << stream_ << ", place: " << place_
<< ", priority: " << static_cast<int>(priority)
<< ", flag:" << static_cast<int>(flag);
Expand Down
9 changes: 5 additions & 4 deletions paddle/phi/core/threadpool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,16 @@ void ThreadPool::Init() {
num_threads,
0,
phi::errors::InvalidArgument("The number of threads is 0."));
threadpool_.reset(new ThreadPool(num_threads));
threadpool_ = std::make_unique<ThreadPool>(num_threads);
}
}

ThreadPool::ThreadPool(int num_threads) : running_(true) {
threads_.resize(num_threads);
for (auto& thread : threads_) {
// TODO(Yancey1989): binding the thread on the specify CPU number
thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this)));
// TODO(Yancey1989): binding the thread on the specify CPU numberw
thread =
std::make_unique<std::thread>(std::bind(&ThreadPool::TaskLoop, this));
}
}

Expand Down Expand Up @@ -111,7 +112,7 @@ ThreadPool* ThreadPoolIO::GetInstanceIO() {
void ThreadPoolIO::InitIO() {
if (io_threadpool_.get() == nullptr) {
// TODO(typhoonzero1986): make this configurable
io_threadpool_.reset(new ThreadPool(FLAGS_io_threadpool_size));
io_threadpool_ = std::make_unique<ThreadPool>(FLAGS_io_threadpool_size);
}
}
} // namespace phi