Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Browse files Browse the repository at this point in the history
… project_fft
  • Loading branch information
chenfeiyu committed Sep 17, 2021
2 parents 76401f4 + fcfb0af commit f8c2a2e
Show file tree
Hide file tree
Showing 23 changed files with 428 additions and 392 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ cc_library(amp SRCS amp_auto_cast.cc DEPS layer )
cc_library(tracer SRCS tracer.cc DEPS layer engine program_desc_tracer amp denormal)
cc_library(basic_engine SRCS basic_engine.cc DEPS layer gradient_accumulator)
cc_library(engine SRCS basic_engine.cc partial_grad_engine.cc DEPS layer gradient_accumulator)
cc_library(imperative_profiler SRCS profiler.cc)
cc_library(imperative_profiler SRCS profiler.cc DEPS flags)
if(NOT WIN32)
if(WITH_NCCL OR WITH_RCCL)
cc_library(imperative_all_reduce SRCS all_reduce.cc DEPS collective_helper device_context selected_rows tensor)
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/imperative/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@
// limitations under the License.

#include "paddle/fluid/imperative/flags.h"
#include "gflags/gflags.h"
#include "paddle/fluid/platform/flags.h"

DEFINE_uint64(dygraph_debug, 0,
"Debug level of dygraph. This flag is not "
"open to users");
PADDLE_DEFINE_EXPORTED_uint64(dygraph_debug, 0,
"Debug level of dygraph. This flag is not "
"open to users");

namespace paddle {
namespace imperative {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/imperative/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
#endif
#include <glog/logging.h>
#include <mutex> // NOLINT
#include "gflags/gflags.h"
#include "paddle/fluid/platform/flags.h"

DEFINE_string(
PADDLE_DEFINE_EXPORTED_string(
tracer_profile_fname, "xxgperf",
"Profiler filename for imperative tracer, which generated by gperftools."
"Only valid when compiled `WITH_PROFILER=ON`. Empty if disable.");
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/memory/allocation/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ cc_test(allocator_facade_abs_flags_test SRCS allocator_facade_abs_flags_test.cc

cc_test(allocator_facade_frac_flags_test SRCS allocator_facade_frac_flags_test.cc DEPS allocator_facade)

cc_library(auto_growth_best_fit_allocator SRCS auto_growth_best_fit_allocator.cc DEPS allocator aligned_allocator)
cc_library(auto_growth_best_fit_allocator SRCS auto_growth_best_fit_allocator.cc DEPS allocator aligned_allocator flags)
cc_test(auto_growth_best_fit_allocator_facade_test SRCS auto_growth_best_fit_allocator_facade_test.cc DEPS cpu_allocator auto_growth_best_fit_allocator)
cc_test(auto_growth_best_fit_allocator_test SRCS auto_growth_best_fit_allocator_test.cc DEPS auto_growth_best_fit_allocator)

Expand Down
9 changes: 5 additions & 4 deletions paddle/fluid/memory/allocation/allocator_facade.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,15 @@
#endif
#include "paddle/fluid/platform/npu_info.h"

DEFINE_int64(
PADDLE_DEFINE_EXPORTED_int64(
gpu_allocator_retry_time, 10000,
"The retry time (milliseconds) when allocator fails "
"to allocate memory. No retry if this value is not greater than 0");

DEFINE_bool(use_system_allocator, false,
"Whether to use system allocator to allocate CPU and GPU memory. "
"Only used for unittests.");
PADDLE_DEFINE_EXPORTED_bool(
use_system_allocator, false,
"Whether to use system allocator to allocate CPU and GPU memory. "
"Only used for unittests.");

namespace paddle {
namespace memory {
Expand Down
27 changes: 15 additions & 12 deletions paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,21 @@
#include <algorithm>
#include <mutex> // NOLINT
#include "paddle/fluid/memory/allocation/aligned_allocator.h"

DEFINE_bool(free_idle_chunk, false,
"Whether to free idle chunk when each allocation is freed. "
"If false, all freed allocation would be cached to speed up next "
"allocation request. If true, no allocation would be cached. This "
"flag only works when FLAGS_allocator_strategy=auto_growth.");

DEFINE_bool(free_when_no_cache_hit, false,
"Whether to free idle chunks when no cache hit. If true, idle "
"chunk would be freed when no cache hit; if false, idle "
"chunk would be freed when out of memory occurs. This flag "
"only works when FLAGS_allocator_strategy=auto_growth.");
#include "paddle/fluid/platform/flags.h"

PADDLE_DEFINE_EXPORTED_READONLY_bool(
free_idle_chunk, false,
"Whether to free idle chunk when each allocation is freed. "
"If false, all freed allocation would be cached to speed up next "
"allocation request. If true, no allocation would be cached. This "
"flag only works when FLAGS_allocator_strategy=auto_growth.");

PADDLE_DEFINE_EXPORTED_READONLY_bool(
free_when_no_cache_hit, false,
"Whether to free idle chunks when no cache hit. If true, idle "
"chunk would be freed when no cache hit; if false, idle "
"chunk would be freed when out of memory occurs. This flag "
"only works when FLAGS_allocator_strategy=auto_growth.");

namespace paddle {
namespace memory {
Expand Down
13 changes: 7 additions & 6 deletions paddle/fluid/memory/allocation/naive_best_fit_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,13 @@
#include "paddle/fluid/platform/xpu/xpu_header.h"
#endif

DEFINE_bool(init_allocated_mem, false,
"It is a mistake that the values of the memory allocated by "
"BuddyAllocator are always zeroed in some op's implementation. "
"To find this error in time, we use init_allocated_mem to indicate "
"that initializing the allocated memory with a small value "
"during unit testing.");
PADDLE_DEFINE_EXPORTED_bool(
init_allocated_mem, false,
"It is a mistake that the values of the memory allocated by "
"BuddyAllocator are always zeroed in some op's implementation. "
"To find this error in time, we use init_allocated_mem to indicate "
"that initializing the allocated memory with a small value "
"during unit testing.");
DECLARE_double(fraction_of_gpu_memory_to_use);
DECLARE_uint64(initial_gpu_memory_in_mb);
DECLARE_uint64(reallocate_gpu_memory_in_mb);
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/operators/pscore/heter_listen_and_serv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ limitations under the License. */
#include "paddle/fluid/operators/pscore/heter_listen_and_serv_op.h"
#include "paddle/fluid/framework/op_registry.h"

DEFINE_int32(rpc_send_thread_num, 12, "number of threads for rpc send");
PADDLE_DEFINE_EXPORTED_int32(rpc_send_thread_num, 12,
"number of threads for rpc send");

namespace paddle {
namespace operators {
Expand Down
30 changes: 15 additions & 15 deletions paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -249,11 +249,11 @@ class ReshapeOp : public framework::OperatorWithKernel {
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_MKLDNN
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
framework::LibraryType::kMKLDNN);
}
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
Expand Down Expand Up @@ -367,11 +367,11 @@ class ReshapeGradOp : public framework::OperatorWithKernel {
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_MKLDNN
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
framework::LibraryType::kMKLDNN);
}
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
Expand Down Expand Up @@ -558,11 +558,11 @@ class Reshape2GradOp : public framework::OperatorWithKernel {
ctx, framework::GradVarName("Out"));

#ifdef PADDLE_WITH_MKLDNN
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
framework::LibraryType::kMKLDNN);
}
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
Expand Down
40 changes: 20 additions & 20 deletions paddle/fluid/operators/squeeze_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,11 @@ class SqueezeOp : public framework::OperatorWithKernel {
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_MKLDNN
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
framework::LibraryType::kMKLDNN);
}
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
Expand All @@ -141,11 +141,11 @@ class SqueezeGradOp : public framework::OperatorWithKernel {
ctx, framework::GradVarName("Out"));

#ifdef PADDLE_WITH_MKLDNN
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
framework::LibraryType::kMKLDNN);
}
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
Expand Down Expand Up @@ -242,11 +242,11 @@ class Squeeze2Op : public framework::OperatorWithKernel {
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_MKLDNN
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
framework::LibraryType::kMKLDNN);
}
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
Expand Down Expand Up @@ -288,11 +288,11 @@ class Squeeze2GradOp : public framework::OperatorWithKernel {
ctx, framework::GradVarName("Out"));

#ifdef PADDLE_WITH_MKLDNN
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
framework::LibraryType::kMKLDNN);
}
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/platform/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,13 @@ if (WITH_PYTHON)
endif(NOT WIN32)
endif()

cc_library(flags SRCS flags.cc DEPS gflags)
cc_library(flags SRCS flags.cc DEPS gflags boost)
cc_library(denormal SRCS denormal.cc DEPS)

cc_library(errors SRCS errors.cc DEPS error_codes_proto)
cc_test(errors_test SRCS errors_test.cc DEPS errors enforce)

set(enforce_deps flags errors boost)
set(enforce_deps flags errors boost flags)
if(WITH_GPU)
set(enforce_deps ${enforce_deps} external_error_proto)
endif()
Expand Down
9 changes: 6 additions & 3 deletions paddle/fluid/platform/cpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ limitations under the License. */
#endif // _WIN32

#include <algorithm>
#include "gflags/gflags.h"
#include "paddle/fluid/platform/flags.h"

DECLARE_double(fraction_of_cpu_memory_to_use);
DECLARE_uint64(initial_cpu_memory_in_mb);
Expand All @@ -42,7 +42,8 @@ DECLARE_double(fraction_of_cuda_pinned_memory_to_use);
// between host and device. Allocates too much would reduce the amount
// of memory available to the system for paging. So, by default, we
// should set false to use_pinned_memory.
DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory.");
PADDLE_DEFINE_EXPORTED_bool(use_pinned_memory, true,
"If set, allocate cpu pinned memory.");

namespace paddle {
namespace platform {
Expand All @@ -54,7 +55,9 @@ size_t CpuTotalPhysicalMemory() {
mib[1] = HW_MEMSIZE;
int64_t size = 0;
size_t len = sizeof(size);
if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) return (size_t)size;
if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) {
return static_cast<size_t>(size);
}
return 0L;
#elif defined(_WIN32)
MEMORYSTATUSEX sMeminfo;
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/platform/enforce.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/type_defs.h"
#endif
#include "paddle/fluid/platform/flags.h"

namespace paddle {
namespace platform {
Expand Down
Loading

1 comment on commit f8c2a2e

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.