This repository has been archived by the owner on Jul 12, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 7
/
caffe.cpp
148 lines (111 loc) · 5 KB
/
caffe.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#include <string>
#include <vector>
#include <sstream>
#include <iostream>
#include <TH/TH.h>
#include "caffe/caffe.hpp"
#include "caffe/util/io.hpp"
extern "C"
{
void* loadCaffeNet(const char* param_file, const char* model_file);
void releaseCaffeNet(void* net_);
void saveCaffeNet(void* net_, const char* weight_file);
void writeCaffeConvLayer(void* net, const char* layername, THFloatTensor* weights, THFloatTensor* bias);
void writeCaffeLinearLayer(void* net, const char* layername, THFloatTensor* weights, THFloatTensor* bias);
void writeCaffeBNLayer(void* net, const char* layername, THFloatTensor* mean, THFloatTensor* var);
void writeCaffeScaleLayer(void* net, const char* layername, THFloatTensor* weights, THFloatTensor* bias);
}
typedef float Dtype;
using namespace caffe; // NOLINT(build/namespaces)
void* loadCaffeNet(const char* param_file, const char* model_file) {
Net<Dtype>* net = new Net<Dtype>(string(param_file), TEST);
if(model_file != NULL)
net->CopyTrainedLayersFrom(string(model_file));
return net;
}
void releaseCaffeNet(void* net_) {
Net<Dtype>* net = (Net<Dtype>*)net_;
if ( net != NULL) {
delete net;
}
}
void saveCaffeNet(void* net_, const char* weight_file) {
Net<Dtype>* net = (Net<Dtype>*)net_;
NetParameter net_param;
net->ToProto(&net_param);
WriteProtoToBinaryFile(net_param, std::string(weight_file));
}
int getTHTensorSize(THFloatTensor* tensor) {
int size = tensor->size[0];
for (int i = 1; i < tensor->nDimension; i++) {
size = size * tensor->size[i];
}
return size;
}
void writeCaffeBNLayer(void* net_, const char* layerName, THFloatTensor* mean, THFloatTensor* var) {
Net<Dtype>* net = (Net<Dtype>*)net_;
const boost::shared_ptr<caffe::Layer<Dtype> > inLayer = net->layer_by_name(std::string(layerName));
vector<shared_ptr<Blob<Dtype> > > blobs = inLayer->blobs();
// Checking size
CHECK_EQ(blobs.size(), 3);
CHECK_EQ(getTHTensorSize(mean), blobs[0]->count());
// Converting 2 parameter(Torch) to 3 parameter(Caffe)
const float* mean_ptr = THFloatTensor_data(mean);
const float* var_ptr = THFloatTensor_data(var);
caffe_set(blobs[2]->count(), 1.0f, blobs[2]->mutable_cpu_data());
caffe_copy(blobs[0]->count(), mean_ptr, blobs[0]->mutable_cpu_data());
caffe_copy(blobs[1]->count(), var_ptr, blobs[1]->mutable_cpu_data());
}
void writeCaffeScaleLayer(void* net_, const char* layerName, THFloatTensor* weights, THFloatTensor* bias) {
Net<Dtype>* net = (Net<Dtype>*)net_;
const boost::shared_ptr<caffe::Layer<Dtype> > inLayer = net->layer_by_name(std::string(layerName));
vector<shared_ptr<Blob<Dtype> > > blobs = inLayer->blobs();
// Checking size
CHECK_EQ(blobs.size(), 2);
CHECK_EQ(getTHTensorSize(weights), blobs[0]->count());
// Copying data
const float* data_ptr = THFloatTensor_data(weights);
caffe_copy(blobs[0]->count(), data_ptr, blobs[0]->mutable_cpu_data());
data_ptr = THFloatTensor_data(bias);
caffe_copy(blobs[1]->count(), data_ptr, blobs[1]->mutable_cpu_data());
}
void writeCaffeConvLayer(void* net_, const char* layerName, THFloatTensor* weights, THFloatTensor* bias) {
Net<Dtype>* net = (Net<Dtype>*)net_;
const boost::shared_ptr<caffe::Layer<Dtype> > inLayer = net->layer_by_name(std::string(layerName));
vector<shared_ptr<Blob<Dtype> > > blobs = inLayer->blobs();
// Checking output layer is conv, so parameter's blob size is 2
if ( blobs.size() != 2) {
std::ostringstream oss;
oss << "Can't write into layer :" << layerName ;
THError(oss.str().c_str());
}
// Checking size
CHECK_EQ(getTHTensorSize(weights), blobs[0]->count());
CHECK_EQ(getTHTensorSize(bias), blobs[1]->count());
// Copying data
const float* data_ptr = THFloatTensor_data(weights);
caffe_copy(blobs[0]->count(), data_ptr, blobs[0]->mutable_cpu_data());
data_ptr = THFloatTensor_data(bias);
caffe_copy(blobs[1]->count(), data_ptr, blobs[1]->mutable_cpu_data());
}
void writeCaffeLinearLayer(void* net_, const char* layerName, THFloatTensor* weights, THFloatTensor* bias) {
Net<Dtype>* net = (Net<Dtype>*)net_;
const boost::shared_ptr<caffe::Layer<Dtype> > inLayer = net->layer_by_name(std::string(layerName));
vector<shared_ptr<Blob<Dtype> > > blobs = inLayer->blobs();
// Checking output layer is conv, so parameter's blob size is 2
if ( blobs.size() != 2) {
std::ostringstream oss;
oss << "Can't write into layer :" << layerName ;
THError(oss.str().c_str());
}
// Checking size
unsigned int th_weights_size = weights->size[0] * weights->size[1];
CHECK_EQ(th_weights_size, blobs[0]->count());
unsigned int th_bias_size = bias->size[0];
CHECK_EQ(th_bias_size, blobs[1]->count());
// Copying data
const float* data_ptr = THFloatTensor_data(weights);
caffe_copy(blobs[0]->count(), data_ptr, blobs[0]->mutable_cpu_data());
data_ptr = THFloatTensor_data(bias);
caffe_copy(blobs[1]->count(), data_ptr, blobs[1]->mutable_cpu_data());
}