-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtrain.py
92 lines (76 loc) · 3.59 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import torch
import torch.nn as nn
import numpy as np
import random
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, confusion_matrix
from dataloader import DatasetLoader
from model_arch import Model
from config import *
def train():
'''
This is wrapper method for training the model.
'''
# Dataset Loaders
train_loader = DatasetLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
print("### Training Dataset loaded from ", train_dataset)
test_loader = DatasetLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)
print("### Testing Dataset loaded from ", test_dataset)
# Model Initialization
model = Model()
print("### Model Initialized")
if model_last_state_epoch != 0:
assert model_last_state != '', "Model last state must be given"
model.load_state_dict(torch.load(model_last_state))
print("### Model Loaded from epoch ", model_last_state_epoch)
model.cuda()
# Loss Function Initialization
loss_func = nn.BCELoss()
print("### Loss Function Initialized")
# Optimizer Initialization
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
print("### Optimizer initialized")
# Seed Selection
torch.manual_seed(seed_value)
np.random.seed(seed_value)
random.seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
print("### Seed selection done")
# Training Initialization
print("### Starting Training ...")
for epoch in tqdm(range(model_last_state_epoch, epoch_num)):
model.train()
for i, (image, target) in enumerate(tqdm(train_loader.load_dataset())):
image = image.cuda()
output = model(image)
optimizer.zero_grad()
loss = loss_func(output.squeeze(), target.float().cuda())
loss.backward()
optimizer.step()
if i % 300 == 0:
print("### Training Loss: ", loss.item())
with open(os.path.join(logs_save_loc, 'train_loss.txt'), 'a') as f:
f.write('Training Loss at Epoch {} Iteration {}: {} \n'.format(epoch, i, loss.item()))
f.close()
torch.save(model.state_dict(), os.path.join(model_save_loc, 'model_{}.pth'.format(epoch)))
print("############################ Evaluation #############################")
model.eval()
actual_target = []
predicted_target = []
with open(os.path.join(logs_save_loc, 'eval_accuracy.txt'), 'a') as f:
for i, (image, target) in enumerate(tqdm(test_loader.load_dataset())):
image = image.cuda()
output = model(image)
output = torch.where(output < 0.15, torch.zeros_like(output), torch.ones_like(output))
actual_target.extend(target.float().cpu().tolist())
predicted_target.extend(output.squeeze().cpu().tolist())
acc = accuracy_score(actual_target, predicted_target, normalize=True)
tn, fp, fn, tp = confusion_matrix(actual_target, predicted_target, labels=[0, 1]).ravel()
apcer = fp/(tn + fp)
bpcer = fn/(fn + tp)
acer = (apcer + bpcer)/2
print("Accuracy: %.4f, TN: %i, FP: %i, FN: %i, TP: %i, APCER: %.4f, BPCER: %.4f, ACER: %.4f" % (acc, tn, fp, fn, tp, apcer, bpcer, acer))
f.write("Epoch : %i \n Accuracy: %.4f, TN: %i, FP: %i, FN: %i, TP: %i, APCER: %.4f, BPCER: %.4f, ACER: %.4f \n" % (epoch, acc, tn, fp, fn, tp, apcer, bpcer, acer))
f.close()