-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy patheval_all.py
99 lines (79 loc) · 3.01 KB
/
eval_all.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import torch
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pickle
import os
import nltk
import configparser
from utils.pycocoeval import *
from torchvision import transforms
from utils.data_loader import get_loader
from utils.build_vocab import Vocabulary
from model_script.model import EncoderCNN, DecoderRNN
from PIL import Image
c = nltk.translate.bleu_score.SmoothingFunction()
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
config = configparser.ConfigParser()
config.read('config.ini')
params = config['EVAL']
encoder_path = params['encoder_path']
decoder_path = params['decoder_path']
crop_size = int(params['crop_size'])
vocab_path = params['vocab_path']
image_dir = params['image_dir']
caption_path = params['caption_path']
embed_size = int(params['embed_size'])
hidden_size = int(params['hidden_size'])
num_layers = int(params['num_layers'])
batch_size = int(params['batch_size'])
num_workers = int(params['num_workers'])
# Image preprocessing
transform = transforms.Compose([
transforms.Resize(229),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build models
encoder = EncoderCNN(embed_size).eval() # eval mode (batchnorm uses moving mean/variance)
decoder = DecoderRNN(embed_size,hidden_size, len(vocab), num_layers).eval()
encoder = encoder.to(device)
decoder = decoder.to(device)
# Load the trained model parameters
encoder.load_state_dict(torch.load(encoder_path))
decoder.load_state_dict(torch.load(decoder_path))
data_loader = get_loader(image_dir,caption_path,vocab,transform,batch_size,True,num_workers)
bleu_score = 0
def id_to_word(si):
s = []
for word_id in si:
word = vocab.idx2word[word_id]
s.append(word)
if word == '<end>':
break
return(s)
for i, (images, captions, lengths) in enumerate(data_loader):
# Generate an caption from the image
images = images.to(device)
feature = encoder(images)
sampled_ids = decoder.sample(feature)
sampled_ids = sampled_ids[0].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)
captions = captions.detach().cpu().numpy()
references = []
for cap in captions:
references.append(id_to_word(cap))
gen_cap = id_to_word(sampled_ids)
rng = range(1)
res = {0: [{'image_id': 0, 'caption': ' '.join(gen_cap)}]}
gts = {0: [{'image_id': 0, 'caption': ' '.join(references[0])}]}
evalObj = COCOEvalCap(rng,gts,gts)
evalObj.evaluate()
print(evalObj.eval)
main()