forked from wazeerzulfikar/alzheimers-dementia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodels.py
112 lines (92 loc) · 4.7 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model
def create_intervention_model(task, longest_speaker_length):
model = tf.keras.Sequential()
model.add(layers.LSTM(16, input_shape=(longest_speaker_length, 3)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.BatchNormalization())
if task == 'classification':
model.add(layers.Dense(2, activation='softmax'))
elif task == 'regression':
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(8, activation='relu'))
model.add(layers.Dense(1))
model.add(layers.ReLU(max_value=30))
return model
def create_pause_model(task, n_features):
model = tf.keras.Sequential()
model.add(layers.Input(shape=(n_features,)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(16, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(24, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(24, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.1))
if task == 'classification':
model.add(layers.Dense(2, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
elif task == 'regression':
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1))
model.add(layers.ReLU(max_value=30))
return model
def create_compare_model(task, features_size):
model = tf.keras.Sequential()
model.add(layers.Input(shape=(features_size,)))
model.add(layers.Dense(24, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
if task == 'classification':
model.add(layers.Dense(2, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
elif task == 'regression':
model.add(layers.Dense(8, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.Dense(8, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.ReLU(max_value=30))
return model
def create_spectogram_model(spectogram_size):
model2_input = layers.Input(shape=spectogram_size, name='spectrogram_input')
model2_BN = layers.BatchNormalization()(model2_input)
model2_hidden1 = layers.Conv2D(16, kernel_size=(3, 3), strides=(1, 1),
activation='relu')(model2_BN)
# model2_hidden2 = layers.Conv2D(16, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden1)
model2_BN1 = layers.BatchNormalization()(model2_hidden1)
model2_hidden2 = layers.MaxPool2D()(model2_BN1)
model2_hidden3 = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
activation='relu')(model2_hidden2)
# model2_hidden4 = layers.Conv2D(32, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden3)
model2_BN2 = layers.BatchNormalization()(model2_hidden3)
model2_hidden4 = layers.MaxPool2D()(model2_BN2)
model2_hidden5 = layers.Conv2D(64, kernel_size=(5, 5), strides=(1, 1),
activation='relu')(model2_hidden4)
# model2_hidden6 = layers.Conv2D(64, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden5)
model2_BN3 = layers.BatchNormalization()(model2_hidden5)
model2_hidden6 = layers.MaxPool2D()(model2_BN3)
model2_hidden7 = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1),
activation='relu')(model2_hidden6)
# model2_hidden8 = layers.Conv2D(128, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden7)
model2_BN4 = layers.BatchNormalization()(model2_hidden7)
model2_hidden8 = layers.MaxPool2D()(model2_BN4)
model2_hidden9 = layers.Flatten()(model2_hidden8)
# model2_hidden10 = layers.Dropout(0.2)(model2_hidden9)
model2_hidden10 = layers.BatchNormalization()(model2_hidden9)
model2_hidden11 = layers.Dense(128, activation='relu')(model2_hidden10)
model2_output = layers.Dropout(0.2)(model2_hidden11)
model2_output = layers.Dense(2, activation='softmax')(model2_output)
model = Model(model2_input, model2_output)
return model