-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathmodel.py
125 lines (99 loc) · 4.53 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import tensorflow as tf
from discriminator import Discriminator
from generator import Generator
class ConditionalGAN:
def __init__(self,
batch_size=256,
num_ant=32,
layers=2,
learning_rate=2e-4,
omni_size=512,
z_size=100
):
"""
Args:
batch_size: integer, batch size
num_ant: integer, number of antennas at BS
layters: integer, layers=2, input both real and imaginary parts together
learning_rate: float, initial learning rate for Adam
omni_size: integer, length of the omni-received signal
z_size: integer, length of the random input vector of GAN
"""
self._batch_size = batch_size
self._num_ant = num_ant
self._omni_size = omni_size
self._learning_rate = learning_rate
self._z_size = z_size
self._layers=layers
self.is_training = tf.placeholder_with_default(True, shape=[], name='is_training')
self.G = Generator('G', self.is_training, num_ant=num_ant, layers=layers)
self.D = Discriminator('D', self.is_training, num_ant=num_ant, layers=layers)
self.z = tf.placeholder(tf.float32, shape=(None, 1, 1, self._z_size))
self.y = tf.placeholder(tf.float32, shape=(None, 1, 1, self._omni_size))
self.x = tf.placeholder(tf.float32, shape=(None, self._num_ant, self._num_ant, self._layers))
self.y1 = tf.placeholder(tf.float32, shape=(None, 4, 4, self._omni_size))
self.y2 = tf.placeholder(tf.float32, shape=(None, 4, 4, self._omni_size))
def model(self):
fake_x = self.G(self.z, self.y)
G_loss = self.generator_loss(self.D, self.x, fake_x, self.y1)
D_loss = self.discriminator_loss(self.D, self.x, fake_x, self.y1, self.y2)
mse = tf.losses.mean_squared_error(labels=self.x, predictions=fake_x)
# summary
tf.summary.histogram('D/true', self.D(self.x,self.y1))
tf.summary.histogram('D/fake', self.D(fake_x,self.y1))
tf.summary.scalar('loss/G', G_loss)
tf.summary.scalar('loss/D', D_loss)
return G_loss, D_loss, fake_x, mse
def G_optimize(self, G_loss):
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002
"""
global_step = tf.Variable(0, trainable=False)
tf.summary.scalar('learning_rate/{}'.format(name), self._learning_rate)
learning_step = (
tf.train.AdamOptimizer(self._learning_rate, beta1=0.5, name=name)
.minimize(loss, global_step=global_step, var_list=variables)
)
return learning_step
G_optimizer = make_optimizer(G_loss, self.G.variables, name='Adam_G')
with tf.control_dependencies([G_optimizer]):
return tf.no_op(name='G_optimizer')
def D_optimize(self, D_loss):
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002
"""
global_step = tf.Variable(0, trainable=False)
tf.summary.scalar('learning_rate/{}'.format(name), self._learning_rate)
learning_step = (
tf.train.AdamOptimizer(self._learning_rate, beta1=0.5, name=name)
.minimize(loss, global_step=global_step, var_list=variables)
)
return learning_step
D_optimizer = make_optimizer(D_loss, self.D.variables, name='Adam_D')
with tf.control_dependencies([D_optimizer]):
return tf.no_op(name='D_optimizer')
def discriminator_loss(self, D, x, fake_x, y1, y2):
"""
Args:
D: discriminator object
x: the covariance matrices from the dataset
fake_x: the covariance matrices generated by G
y1: reshaped and duplicated omni-received signal which corresponds to x
y2: reshaped and duplicated omni-received signal which are not related to x
Returns:
loss: scalar
"""
D_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D(x,y1), labels=tf.ones([self._batch_size, 1, 1, 1])))
D_loss_fake1 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D(fake_x,y1), labels=tf.zeros([self._batch_size, 1, 1, 1])))
D_loss_fake2 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D(x,y2), labels=tf.zeros([self._batch_size, 1, 1, 1])))
D_loss = D_loss_real + (D_loss_fake1 + D_loss_fake2) / 2
return D_loss
def generator_loss(self, D, x, fake_x, y1):
G_loss_gan = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D(fake_x,y1), labels=tf.ones([self._batch_size, 1, 1, 1])))
mse = tf.losses.mean_squared_error(labels=x, predictions=fake_x)
G_loss = 10*G_loss_gan + mse
return G_loss