-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathffnnetwork.m
110 lines (99 loc) · 3.75 KB
/
ffnnetwork.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Alkim GOKCEN - Contact: alkim.gokcen@outlook.com,
% FeedForwardNeuralNetwork a.gokcen@baylanwatermeters.com,
% y190207003@ogr.ikc.edu.tr
% University of Izmir Katip Celebi, Institute of Applied Sciences, EEE
% Baylan Watermeters, Research & Development Department
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Syntax ------------------------------------------------------------------
% ** input is a matrix in size of MxN where M is the # of feature,N is the#
% of sample
% ** output is a matrix in size of KxN where K is the # of outputs, N is
% the #of sample
% ** neuron is a scalar value that represents the # of hiddenLayer neurons
% ** minMSE is minimum traning error shoudl be achieved
% ** minGRAD ,s minimum gradient vector norm
% ** feature is # of feature
% ** nin is # of neuron
% ** nout is # of output
% ** iter is the epoch
% ** uK is the hessian scaler
% ** uscale,min,max are hessian scaler determination parameters
% ** findJacobian() computes jacobian
% ** vectorizationGradient() vectorizes the matrix
% ** devecotrization() performs devecotrization to vector
% ** pk, zk, param are the cendidate steps (pk,zk) and coefficient vector.
% ** costx is the traning cost.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [Win, Wout, bin, bout, nin, pred] = ffnnetwork(input, output, neuron, minMSE, minGRAD)
X = input;
Y = output;
[feature, ~] = size(X); % sample is # of data, feature is # of input
[nout,~] = size(Y); % outcol is # of output
nin = neuron; % number of neuron
Win = rand(nin, feature); % input layer weight matrix
bin = rand(nin, 1); % input layer bias matrix
Wout = rand(nout, nin); %output layer weight matrix
bout = rand(nout, 1); %output layer bias matrix
[~, error,netout,netin] = yprediction(X,Win,bin,Wout,bout,Y);
iter = 0;
uK = 1.01;
uscale = 10;
umin = 1e-10;
umax = 1e+10;
costx = 0.5*sum(error.^2);
loop1 = 1;
while loop1
iter = iter +1;
param = vectorizationGradient(Win,Wout,bin,bout,nin,feature,nout);
J = findJacobian(netout, netin, Wout, X, nin, feature, nout);
loop2 = 1;
while loop2
pk = -inv(J'*J + uK*eye(size(J,2),size(J,2)))*J'*error';
zk = param + pk;
[Win,Wout,bin,bout] = devecotrization(zk,nin,nout,feature);
[pred, error,netout,netin] = yprediction(X,Win,bin,Wout,bout,Y);
costz = 0.5*sum(error.^2);
if costz<costx
[~,sk,~,~,~,~] = goldenSection(LowerLimit,UpperLimit,1e-10,Y,nin,param,pk,feature,X);
% sk = newtonRhapson(param,pk,X,Y,nin,feature);
param = param + 0.001*pk;
[Win,Wout,bin,bout] = devecotrization(zk,nin,nout,feature);
[~, error,netout,netin] = yprediction(X,Win,bin,Wout,bout,Y);
costx = sum(error.^2);
uK = uK/uscale;
loop2 = 0;
clc;
disp('Cost');
disp(costx);
else
uK = uK*uscale;
end
if(uK<umin)||(umax<uK)
loop1 = 0;
loop2 = 0;
end
end
if costx<minMSE && norm(2*J'*error')<minGRAD
loop1 = 0;
end
end
end
function y = h(x)
y = tansig(x);
end
function y = hprime(x)
y = (1-tansig(x).^2);
end
function y = hOut(x)
y = x;
end
function y = hprimeOut(x)
y = ones(size(x));
end
function [ypred,error,netout,netin] = yprediction(X,Win,bin,Wout,bout,Y)
netin = Win*X + bin;
netout = Wout*h(netin) + bout;
ypred = hOut(netout);
error = Y-ypred;
end