-
Notifications
You must be signed in to change notification settings - Fork 0
/
perceptron1.py
158 lines (118 loc) · 4.66 KB
/
perceptron1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
import time
import math
# This is the sign function. May not need when using gradient descent though.
def sign(value):
if value > 0:
return 1
elif value == 0:
return 0
else:
return -1
def accuracytest(predicted,actual):
total = len(predicted)
correct = 0.0
# Checks if the actual matches the predicted.
for i in range(0,len(predicted)):
if predicted[i] == actual[i]:
correct += 1
return correct,total
############## Gradient descent method #########################################
# This performs the gradient descent version of the perceptron.
# Input is the nth tuple and the weight vector.
def predict(data_vector, weights):
wtx = np.dot(np.transpose(weights), data_vector)
tn = (math.exp(wtx) - math.exp(-wtx))/(math.exp(wtx)+math.exp(-wtx))
return tn, wtx
def gprime(wtx):
# The derivative of the activation function. (4e^(2a))/((e^(2a)+1)^2).
a = math.exp(2*wtx)
try:
anns = (4*a)/((a + 1)**2)
except ArithmeticError:
exit()
return anns
def adjustweights(data_vector, tn, yn, wtx, weights,learn):
# Updates the weight vector. w = w - (tn-yn)g'(wtx)x
weights = np.subtract(weights, np.multiply(learn, np.dot((tn - yn)*gprime(wtx),data_vector)))
#print weights
#print (tn - yn)
#print np.multiply((tn - yn)*gprime(wtx),data_vector)
return weights
def update_progress(progress,runs):
sys.stdout.write('\r[{0}] {1}%'.format('#'*(int(progress/(5.0/runs))), progress*100))
sys.stdout.flush()
def gradienttrain(data_matrix, real_classes, runs):
# Initialize the weights vector at all 0's. The length of the vector is determined by the number of attributes. Add one more feature
# for b.
weights = np.asarray([0]*(data_matrix.shape[1]+1))
predicted = []
curr_run = 1
best_accuracy = 0
best_weights = None
progress = 0
update_progress(progress, runs)
# Here we will loop through the training data matrix as many times as the runs specifies to keep refining the accuracy.
while curr_run <= runs:
for obsindex in range(0, data_matrix.shape[0]):
# insert a 1 into the vector
data_vector = np.insert(data_matrix[obsindex], data_matrix[obsindex].shape[0], 1.0)
tn, wtx = predict(data_vector, weights)
predicted.append(sign(tn))
weights = adjustweights(data_vector, tn, real_classes[obsindex], wtx, weights, 0.2)
correct, total = accuracytest(predicted, real_classes)
accuracy = correct/total
if accuracy > best_accuracy:
best_accuracy = accuracy
best_weights = weights
curr_run += 1
predicted = []
# Shuffle the data_matrix so that the nodes
progress += (1/(runs*1.0))
update_progress(progress,runs)
#np.random.shuffle(data_matrix)
#print ("\nAfter %d runs, the best accuracy was %s" % (curr_run, str(round(best_accuracy,2))))
# Return the weights for classification
return best_weights
def classify(test_matrix, test_class, weights):
predicted = []
for obsindex in range(0, test_matrix.shape[0]):
test_vector = np.insert(test_matrix[obsindex], test_matrix[obsindex].shape[0], 1.0)
tn, wtx = predict(test_vector, weights)
predicted.append(sign(tn))
correct, total = accuracytest(predicted, test_class)
accuracy = correct/total
#print ("For the SVM classifier the accuracy on the test set was %s" % (str(round(accuracy, 2))))
return (round(accuracy,4)*100)
############################### Normal Perceptron Method ###############################################
def normpredict(data_vector, weights):
wtx = np.dot(np.transpose(weights), data_vector)
tn = sign(wtx)
return tn, wtx
def normadjustweights(data_vector, tn, yn, wtx, weights):
# Updates the weight vector. w = w - (tn-yn)g'(wtx)x
if tn != yn:
weights = np.add(weights,np.dot(yn, data_vector))
return weights
def normtrain(data_matrix, real_classes, runs):
# Initialize the weights vector at all 0's. The length of the vector is determined by the number of attributes.
weights = np.asarray([0]*data_matrix.shape[1])
predicted = []
curr_run = 1
# Here we will loop through the training data matrix as many times as the runs specifies to keep refining the accuracy.
print predict(data_matrix[1], weights)
while curr_run <= runs:
for obsindex in range(0, data_matrix.shape[0]):
#print obsindex
tn, wtx = normpredict(data_matrix[obsindex], weights)
predicted.append(tn)
weights = normadjustweights(data_matrix[obsindex], tn, real_classes[obsindex], wtx, weights)
correct, total = accuracytest(predicted, real_classes)
accuracy = correct/total
print ("Run number %d had an accuracy of %s" % (curr_run, str(accuracy)))
curr_run += 1
predicted = []
# python /home/chris/Documents/Machine_Learning/Perception_SVM.py