-
Notifications
You must be signed in to change notification settings - Fork 1
/
models.py
98 lines (67 loc) · 2.68 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def fan_in_uniform_init(tensor, fan_in=None):
"""Utility function for initializing actor and critic"""
if fan_in is None:
fan_in = tensor.size(-1)
w = 1. / np.sqrt(fan_in)
nn.init.uniform_(tensor, -w, w)
class Actor(nn.Module):
def __init__(self, state_space, action_space):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_space, 128)
self.fc2 = nn.Linear(128, 300)
self.fc3 = nn.Linear(300, 400)
self.fc4 = nn.Linear(400, action_space)
# Weight Init
fan_in_uniform_init(self.fc1.weight)
fan_in_uniform_init(self.fc1.bias)
fan_in_uniform_init(self.fc2.weight)
fan_in_uniform_init(self.fc2.bias)
fan_in_uniform_init(self.fc3.weight)
fan_in_uniform_init(self.fc3.bias)
nn.init.uniform_(self.fc4.weight, -3*1e-3, 3*1e-3)
nn.init.uniform_(self.fc4.bias, -3*1e-3, 3*1e-3)
self.b1 = nn.BatchNorm1d(128)
self.b2 = nn.BatchNorm1d(300)
self.b3 = nn.BatchNorm1d(400)
def forward(self, x):
x = self.b1(F.relu(self.fc1(x)))
x = self.b2(F.relu(self.fc2(x)))
x = self.b3(F.relu(self.fc3(x)))
return torch.tanh(self.fc4(x))
class Critic(nn.Module):
def __init__(self, state_space, action_space):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_space, 128)
self.fcA1 = nn.Linear(action_space, 256)
self.fcS1 = nn.Linear(128, 256)
self.fc2 = nn.Linear(256, 300)
self.fc3 = nn.Linear(300, 400)
self.fc4 = nn.Linear(400, 1)
# Weight Init
fan_in_uniform_init(self.fc1.weight)
fan_in_uniform_init(self.fc1.bias)
fan_in_uniform_init(self.fcA1.weight)
fan_in_uniform_init(self.fcA1.bias)
fan_in_uniform_init(self.fcS1.weight)
fan_in_uniform_init(self.fcS1.bias)
fan_in_uniform_init(self.fc2.weight)
fan_in_uniform_init(self.fc2.bias)
fan_in_uniform_init(self.fc3.weight)
fan_in_uniform_init(self.fc3.bias)
nn.init.uniform_(self.fc4.weight, -3*1e-3, 3*1e-3)
nn.init.uniform_(self.fc4.bias, -3*1e-3, 3*1e-3)
self.b1 = nn.BatchNorm1d(128)
self.b2 = nn.BatchNorm1d(256)
def forward(self, state, action):
x = self.b1(F.relu(self.fc1(state)))
aOut = self.fcA1(F.relu(action))
sOut = self.b2(F.relu(self.fcS1(x)))
comb = F.relu(aOut+sOut)
out = F.relu(self.fc2(comb))
out = F.relu(self.fc3(out))
out = self.fc4(out)
return out