-
Notifications
You must be signed in to change notification settings - Fork 0
/
performance_simulation.py
executable file
·110 lines (92 loc) · 3.36 KB
/
performance_simulation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
#!/usr/bin/python
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
import re
import pandas as pd
from collections import Counter
from sklearn.svm import SVR
import itertools
from sklearn.metrics import mean_squared_error
from sklearn.svm import NuSVR
from sklearn.neural_network import MLPRegressor
import ann
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
data = pd.read_csv('datasets/final_1.csv', delimiter=' ')
C = data[['cluster','engagement', 'length','robot_feedback', 'previous_score', 'current_score' , 'current_result', 'action']]
# cluster data
C0 = C.loc[C['cluster']==0]
C1 = C.loc[C['cluster']==1]
C2 = C.loc[C['cluster']==2]
P0 = C0[['engagement', 'length','robot_feedback', 'previous_score', 'current_result']]
P1 = C1[['engagement', 'length','robot_feedback', 'previous_score', 'current_result']]
P2 = C2[['engagement', 'length','robot_feedback', 'previous_score', 'current_result']]
a0 = P0.groupby(['length','robot_feedback', 'previous_score'])
a1 = P1.groupby(['length','robot_feedback', 'previous_score'])
a2 = P2.groupby(['length','robot_feedback', 'previous_score'])
D = [3,5,7,9]
S = [-4,-3,-2,-1,0,1,2,3,4]
L = [0.25, 0.5, 0.75, 1.0]
RF = [[1.0,0.0,0.0], [0.0,1.0,0.0], [0.0,0.0,1.0]]
PS = [-1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0]
combs = (L, RF, PS)
states = list(itertools.product(*combs))
state_level = []
tmp = 0
for i, s in enumerate(states):
if tmp < s[0]:
state_level.append(i)
tmp = s[0]
for ii, cluster in enumerate([a0,a1,a2]):
train_X = []
train_Y = []
for key, item in cluster:
A = cluster.get_group(key)
wins = len(A.loc[A['current_result']==1])
losses = len(A.loc[A['current_result']==-1])
if wins == 0:
p = 0.0
elif losses == 0:
p = 1.0
else:
p = wins/float(wins+losses)
training = [L[D.index(key[0])], RF[key[1]][0], RF[key[1]][1], RF[key[1]][2], PS[S.index(key[2])]]
target = p
train_X.append(training)
train_Y.append(target)
#svr = NuSVR(kernel='rbf', C=10.0)
#svr.fit(train_X, train_Y)
#svr_prediction = svr.predict(train_X)
#svr_mse = mean_squared_error(train_Y, svr_prediction)
#svr_rmse1 = np.sqrt(svr_mse)
#print "\t\tTraining Data: SVR Root Mean Square Error = {0:0.2f}".format(svr_rmse1)
#NN = MLPRegressor(activation='logistic', max_iter=5000, solver='lbfgs', hidden_layer_sizes=(3, 6), random_state=42)
#NN.fit(train_X, train_Y)
#NN_prediction = NN.predict(train_X)
N = ann.build_pmodel()
x = np.asarray(train_X)
y = np.asarray(train_Y)
N.fit(x, y, epochs=10000, verbose=0)
print ii
preds = []
for s in states:
#inputs.append([s[0], s[1][0], s[1][1], s[1][2], s[2]])
preds.append(N.predict(np.asarray([s[0], s[1][0], s[1][1], s[1][2], s[2]]).reshape(1,5))[0][0])
#preds = N.predict(np.asarray(inputs))[0][0]
plt.bar(left=state_level, height=[1.2,1.2,1.2,1.2], width=27, color=['k','w', 'k', 'w'], alpha=0.1)
plt.hold(True)
plt.plot(preds, label = 'regression values')
first = 1
for a,b in zip(train_X, train_Y):
if first:
plt.plot(states.index(tuple([a[0], [a[1], a[2], a[3]], a[4]])), b, 'or', label = 'real values')
first = 0
else:
plt.plot(states.index(tuple([a[0], [a[1], a[2], a[3]], a[4]])), b, 'or')
plt.legend()
plt.savefig('simulation/performance_c' + str(ii) + '.png')
plt.close()
N.save('simulation/user' + str(ii) + '_performance.h5', 'wb')