-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathibp.py
57 lines (46 loc) · 1.79 KB
/
ibp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# reference from https://github.com/tuomaso/radial_rl/blob/master/DQN/ibp.py
import torch
import torch.nn as nn
import torch.nn.functional as F
def initial_bounds(x0, epsilon):
'''
x0 = input, b x c x h x w
'''
upper = x0 + epsilon
lower = x0 - epsilon
return upper, lower
def weighted_bound(layer, prev_upper, prev_lower):
prev_mu = (prev_upper + prev_lower) / 2
prev_r = (prev_upper - prev_lower) / 2
# add scale factor for NTK parameterization
scale = torch.sqrt(torch.tensor(prev_mu.size()[-1]))
mu = layer(prev_mu) / scale
if type(layer) == nn.Linear:
r = F.linear(prev_r, torch.abs(layer.weight)) / scale
elif type(layer) == nn.Conv2d:
r = F.conv2d(prev_r, torch.abs(layer.weight), stride=layer.stride, padding=layer.padding)
upper = mu + r
lower = mu - r
return upper, lower
def activation_bound(layer, prev_upper, prev_lower):
upper = layer(prev_upper)
lower = layer(prev_lower)
return upper, lower
def network_bounds(model, x0, epsilon):
'''
get inteval bound progation upper and lower bounds for the actiavtion of a model
model: a nn.Sequential module
x0: input, b x input_shape
epsilon: float, the linf distance bound is calculated over
'''
upper, lower = initial_bounds(x0, epsilon)
for layer in model.modules():
if type(layer) in (nn.Sequential,):
pass
elif type(layer) in (nn.ReLU, nn.Sigmoid, nn.Tanh, nn.MaxPool2d, nn.Flatten):
upper, lower = activation_bound(layer, upper, lower)
elif type(layer) in (nn.Linear, nn.Conv2d):
upper, lower = weighted_bound(layer, upper, lower)
else:
print('Unsupported layer:', type(layer))
return upper, lower