forked from Mr-klein/Deep_learning_G17
-
Notifications
You must be signed in to change notification settings - Fork 0
/
convolution_visualization2.py
85 lines (64 loc) · 2.73 KB
/
convolution_visualization2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 15:03:19 2019
@author: Zhouxin
"""
import cv2
import matplotlib.gridspec as gridspec
#%reload_ext autoreload
#%autoreload 2
#%matplotlib inline
seed=42
from fastai.vision import *
from fastai.callbacks.hooks import *
import scipy.ndimage
import gc
from PIL import Image, ImageDraw
data = Image.open("")
np.random.seed(seed)
class FilterVisualizer():
# def __init__(self,model):
# self.model = model
def __init__(self, size=56, upscaling_steps=12, upscaling_factor=1.2):
self.size, self.upscaling_steps, self.upscaling_factor = size, upscaling_steps, upscaling_factor
self.model = vgg16(pre=True).cuda().eval()
set_trainable(self.model, False)
def visualize(self, sz, layer, filter, upscaling_steps=12, upscaling_factor=1.2, lr=0.1, opt_steps=20, blur=None, print_losses=False):
img = (np.random.random((sz,sz, 3)) * 20 + 128.)/255 # value b/t 0 and 1
activations = SaveFeatures(layer) # register hook
for i in range(upscaling_steps):
# convert np to tensor + channel first + new axis, and apply imagenet norm
img_tensor = np2tensor(img,np.float32)
img_tensor = img_tensor.cuda()
img_tensor.requires_grad_();
if not img_tensor.grad is None:
img_tensor.grad.zero_();
optimizer = torch.optim.Adam([img_tensor], lr=0.1, weight_decay=1e-6)
if i > upscaling_steps/2:
opt_steps_ = int(opt_steps*1.3)
else:
opt_steps_ = opt_steps
for n in range(opt_steps_): # optimize pixel values for opt_steps times
optimizer.zero_grad()
_=self.model(img_tensor)
loss = -1*activations.features[0, filter].mean()
if print_losses:
if i%3==0 and n%5==0:
print(f'{i} - {n} - {float(loss)}')
loss.backward()
optimizer.step()
# convert tensor back to np
img = tensor2np(img_tensor)
self.output = img
sz = int(upscaling_factor * sz) # calculate new image size
# print(f'Upscale img to: {sz}')
img = cv2.resize(img, (sz, sz), interpolation = cv2.INTER_CUBIC) # scale image up
if blur is not None: img = cv2.blur(img,(blur,blur)) # blur image to reduce high frequency patterns
activations.close()
return np.clip(self.output, 0, 1)
fv = FilterVisualizer(m)
print(m[0][4])
#layer = 40
#filter = 265
#FV = FilterVisualizer(size=56, upscaling_steps=12, upscaling_factor=1.2)
#FV.visualize(layer, filter, blur=5)