Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix cpplint errors with paddle/fluid/recordio #9688

Merged
merged 2 commits into from
Apr 7, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion cmake/external/snappystream.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -54,5 +54,7 @@ add_library(snappystream STATIC IMPORTED GLOBAL)
set_property(TARGET snappystream PROPERTY IMPORTED_LOCATION
"${SNAPPYSTREAM_INSTALL_DIR}/lib/libsnappystream.a")

include_directories(${SNAPPYSTREAM_INCLUDE_DIR})
include_directories(${SNAPPYSTREAM_INCLUDE_DIR}) # For snappysteam to include its own headers.
include_directories(${THIRD_PARTY_PATH}/install) # For Paddle to include snappy stream headers.

add_dependencies(snappystream extern_snappystream)
3 changes: 2 additions & 1 deletion cmake/external/zlib.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ ELSE(WIN32)
SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE)
ENDIF(WIN32)

INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR})
INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) # For zlib code to include its own headers.
INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include zlib.h.

ExternalProject_Add(
extern_zlib
Expand Down
File renamed without changes.
9 changes: 3 additions & 6 deletions paddle/fluid/inference/io.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ bool IsPersistable(const framework::VarDesc* var) {
return false;
}

void LoadPersistables(framework::Executor& executor,
framework::Scope& scope,
void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
const framework::ProgramDesc& main_program,
const std::string& dirname,
const std::string& param_filename) {
Expand Down Expand Up @@ -108,10 +107,8 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor& executor,
}

std::unique_ptr<framework::ProgramDesc> Load(
framework::Executor& executor,
framework::Scope& scope,
const std::string& prog_filename,
const std::string& param_filename) {
framework::Executor& executor, framework::Scope& scope,
const std::string& prog_filename, const std::string& param_filename) {
std::string model_filename = prog_filename;
std::string program_desc_str;
ReadBinaryFile(model_filename, program_desc_str);
Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/inference/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ limitations under the License. */
namespace paddle {
namespace inference {

void LoadPersistables(framework::Executor& executor,
framework::Scope& scope,
void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
const framework::ProgramDesc& main_program,
const std::string& dirname,
const std::string& param_filename);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ TEST(inference, fit_a_line) {
// The second dim of the input tensor should be 13
// The input data should be >= 0
int64_t batch_size = 10;
SetupTensor<float>(
input, {batch_size, 13}, static_cast<float>(0), static_cast<float>(10));
SetupTensor<float>(input, {batch_size, 13}, static_cast<float>(0),
static_cast<float>(10));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,8 @@ TEST(inference, image_classification) {
paddle::framework::LoDTensor input;
// Use normilized image pixels as input data,
// which should be in the range [0.0, 1.0].
SetupTensor<float>(input,
{FLAGS_batch_size, 3, 32, 32},
static_cast<float>(0),
static_cast<float>(1));
SetupTensor<float>(input, {FLAGS_batch_size, 3, 32, 32},
static_cast<float>(0), static_cast<float>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input);

Expand All @@ -48,8 +46,8 @@ TEST(inference, image_classification) {

// Run inference on CPU
LOG(INFO) << "--- CPU Runs: ---";
TestInference<paddle::platform::CPUPlace>(
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat);
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
FLAGS_repeat);
LOG(INFO) << output1.dims();

#ifdef PADDLE_WITH_CUDA
Expand All @@ -59,8 +57,8 @@ TEST(inference, image_classification) {

// Run inference on CUDA GPU
LOG(INFO) << "--- GPU Runs: ---";
TestInference<paddle::platform::CUDAPlace>(
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat);
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
FLAGS_repeat);
LOG(INFO) << output2.dims();

CheckError<float>(output1, output2);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) {
int64_t predicate_dict_len = 3162;
int64_t mark_dict_len = 2;

SetupLoDTensor(word,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(word, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(predicate,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(predicate, lod, static_cast<int64_t>(0),
static_cast<int64_t>(predicate_dict_len - 1));
SetupLoDTensor(ctx_n2,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_n2, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_n1,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_n1, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_0,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_0, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_p1,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_p1, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_p2,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_p2, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(mark,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(mark, lod, static_cast<int64_t>(0),
static_cast<int64_t>(mark_dict_len - 1));

std::vector<paddle::framework::LoDTensor*> cpu_feeds;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,8 @@ TEST(inference, recognize_digits) {
paddle::framework::LoDTensor input;
// Use normilized image pixels as input data,
// which should be in the range [-1.0, 1.0].
SetupTensor<float>(input,
{FLAGS_batch_size, 1, 28, 28},
static_cast<float>(-1),
static_cast<float>(1));
SetupTensor<float>(input, {FLAGS_batch_size, 1, 28, 28},
static_cast<float>(-1), static_cast<float>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input);

Expand All @@ -49,8 +47,8 @@ TEST(inference, recognize_digits) {

// Run inference on CPU
LOG(INFO) << "--- CPU Runs: is_combined=" << is_combined << " ---";
TestInference<paddle::platform::CPUPlace>(
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined);
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
FLAGS_repeat, is_combined);
LOG(INFO) << output1.dims();

#ifdef PADDLE_WITH_CUDA
Expand All @@ -60,8 +58,8 @@ TEST(inference, recognize_digits) {

// Run inference on CUDA GPU
LOG(INFO) << "--- GPU Runs: is_combined=" << is_combined << " ---";
TestInference<paddle::platform::CUDAPlace>(
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined);
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
FLAGS_repeat, is_combined);
LOG(INFO) << output2.dims();

CheckError<float>(output1, output2);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) {
paddle::framework::LoDTensor word_data, trg_word;
paddle::framework::LoD lod{{0, 4, 10}};

SetupLoDTensor(
word_data, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
SetupLoDTensor(
trg_word, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
SetupLoDTensor(word_data, lod, static_cast<int64_t>(0),
static_cast<int64_t>(1));
SetupLoDTensor(trg_word, lod, static_cast<int64_t>(0),
static_cast<int64_t>(1));

std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&word_data);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) {
paddle::framework::LoD lod{{0, 4, 10}};
int64_t word_dict_len = 5147;

SetupLoDTensor(words,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(words, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));

std::vector<paddle::framework::LoDTensor*> cpu_feeds;
Expand Down
25 changes: 8 additions & 17 deletions paddle/fluid/inference/tests/test_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@ limitations under the License. */

template <typename T>
void SetupTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims,
T lower,
T upper) {
paddle::framework::DDim dims, T lower, T upper) {
srand(time(0));
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
for (int i = 0; i < input.numel(); ++i) {
Expand All @@ -33,27 +31,23 @@ void SetupTensor(paddle::framework::LoDTensor& input,

template <typename T>
void SetupTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims,
std::vector<T>& data) {
paddle::framework::DDim dims, std::vector<T>& data) {
CHECK_EQ(paddle::framework::product(dims), static_cast<int64_t>(data.size()));
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
memcpy(input_ptr, data.data(), input.numel() * sizeof(T));
}

template <typename T>
void SetupLoDTensor(paddle::framework::LoDTensor& input,
paddle::framework::LoD& lod,
T lower,
T upper) {
paddle::framework::LoD& lod, T lower, T upper) {
input.set_lod(lod);
int dim = lod[0][lod[0].size() - 1];
SetupTensor<T>(input, {dim, 1}, lower, upper);
}

template <typename T>
void SetupLoDTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims,
paddle::framework::LoD lod,
paddle::framework::DDim dims, paddle::framework::LoD lod,
std::vector<T>& data) {
const size_t level = lod.size() - 1;
CHECK_EQ(dims[0], static_cast<int64_t>((lod[level]).back()));
Expand Down Expand Up @@ -92,8 +86,7 @@ template <typename Place>
void TestInference(const std::string& dirname,
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
std::vector<paddle::framework::LoDTensor*>& cpu_fetchs,
const int repeat = 1,
const bool is_combined = false) {
const int repeat = 1, const bool is_combined = false) {
// 1. Define place, executor, scope
auto place = Place();
auto executor = paddle::framework::Executor(place);
Expand Down Expand Up @@ -132,11 +125,9 @@ void TestInference(const std::string& dirname,
// `fluid.io.save_inference_model`.
std::string prog_filename = "__model_combined__";
std::string param_filename = "__params_combined__";
inference_program =
paddle::inference::Load(executor,
*scope,
dirname + "/" + prog_filename,
dirname + "/" + param_filename);
inference_program = paddle::inference::Load(
executor, *scope, dirname + "/" + prog_filename,
dirname + "/" + param_filename);
} else {
// Parameters are saved in separate files sited in the specified
// `dirname`.
Expand Down
5 changes: 0 additions & 5 deletions paddle/fluid/memory/.clang-format

This file was deleted.

5 changes: 0 additions & 5 deletions paddle/fluid/operators/.clang-format

This file was deleted.

5 changes: 0 additions & 5 deletions paddle/fluid/platform/.clang-format

This file was deleted.

5 changes: 0 additions & 5 deletions paddle/fluid/pybind/.clang-format

This file was deleted.

14 changes: 8 additions & 6 deletions paddle/fluid/recordio/chunk.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,13 @@

#include "paddle/fluid/recordio/chunk.h"

#include <algorithm>
#include <memory>
#include <sstream>

#include "paddle/fluid/platform/enforce.h"
#include "snappystream.hpp"
#include "zlib.h"
#include "snappy_stream/include/snappystream.hpp"
#include "zlib/include/zlib.h"

namespace paddle {
namespace recordio {
Expand Down Expand Up @@ -58,8 +60,8 @@ static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) {
* Copy stream in to another stream
*/
static void PipeStream(std::istream& in, std::ostream& os) {
ReadStreamByBuf(
in, 0, [&os](const char* buf, size_t len) { os.write(buf, len); });
ReadStreamByBuf(in, 0,
[&os](const char* buf, size_t len) { os.write(buf, len); });
}

/**
Expand All @@ -68,8 +70,8 @@ static void PipeStream(std::istream& in, std::ostream& os) {
static uint32_t Crc32Stream(std::istream& in, size_t limit = 0) {
uint32_t crc = static_cast<uint32_t>(crc32(0, nullptr, 0));
ReadStreamByBuf(in, limit, [&crc](const char* buf, size_t len) {
crc = static_cast<uint32_t>(crc32(
crc, reinterpret_cast<const Bytef*>(buf), static_cast<uInt>(len)));
crc = static_cast<uint32_t>(crc32(crc, reinterpret_cast<const Bytef*>(buf),
static_cast<uInt>(len)));
});
return crc;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/recordio/chunk.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ namespace recordio {

// A Chunk contains the Header and optionally compressed records.
class Chunk {
public:
public:
Chunk() : num_bytes_(0) {}
void Add(const std::string& buf) {
num_bytes_ += buf.size();
Expand All @@ -46,7 +46,7 @@ class Chunk {

bool Empty() const { return records_.empty(); }

private:
private:
std::vector<std::string> records_;
// sum of record lengths in bytes.
size_t num_bytes_;
Expand Down
12 changes: 5 additions & 7 deletions paddle/fluid/recordio/chunk_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,29 +18,27 @@

#include "gtest/gtest.h"

using namespace paddle::recordio;

TEST(Chunk, SaveLoad) {
Chunk ch;
paddle::recordio::Chunk ch;
ch.Add(std::string("12345", 6));
ch.Add(std::string("123", 4));
std::stringstream ss;
ch.Write(ss, Compressor::kNoCompress);
ch.Write(ss, paddle::recordio::Compressor::kNoCompress);
ss.seekg(0);
ch.Parse(ss);
ASSERT_EQ(ch.NumBytes(), 10U);
}

TEST(Chunk, Compressor) {
Chunk ch;
paddle::recordio::Chunk ch;
ch.Add(std::string("12345", 6));
ch.Add(std::string("123", 4));
ch.Add(std::string("123", 4));
ch.Add(std::string("123", 4));
std::stringstream ss;
ch.Write(ss, Compressor::kSnappy);
ch.Write(ss, paddle::recordio::Compressor::kSnappy);
std::stringstream ss2;
ch.Write(ss2, Compressor::kNoCompress);
ch.Write(ss2, paddle::recordio::Compressor::kNoCompress);
ASSERT_LE(ss.tellp(), ss2.tellp()); // Compress should contain less data;

ch.Clear();
Expand Down
Loading