-
Notifications
You must be signed in to change notification settings - Fork 1
/
test.py
92 lines (79 loc) · 2.81 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import argparse
import logging
import random
import torch
from mmcv.runner import get_dist_info, get_time_str, init_dist
from os import path as osp
from basicsr.data import create_dataloader, create_dataset
from basicsr.models import create_model
from basicsr.utils import (get_env_info, get_root_logger, make_exp_dirs,
set_random_seed)
from basicsr.utils.options import dict2str, parse
def main():
# options
parser = argparse.ArgumentParser()
parser.add_argument(
'-opt', type=str, required=True, help='Path to option YAML file.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
opt = parse(args.opt, is_train=False)
# distributed testing settings
if args.launcher == 'none': # non-distributed testing
opt['dist'] = False
print('Disable distributed testing.', flush=True)
else:
opt['dist'] = True
if args.launcher == 'slurm' and 'dist_params' in opt:
init_dist(args.launcher, **opt['dist_params'])
else:
init_dist(args.launcher)
rank, world_size = get_dist_info()
opt['rank'] = rank
opt['world_size'] = world_size
make_exp_dirs(opt)
log_file = osp.join(opt['path']['log'],
f"test_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# random seed
seed = opt['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
opt['manual_seed'] = seed
logger.info(f'Random seed: {seed}')
set_random_seed(seed + rank)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(
test_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=None,
seed=seed)
logger.info(
f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
test_loaders.append(test_loader)
# create model
model = create_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info(f'Testing {test_set_name}...')
model.validation(
test_loader,
current_iter=opt['name'],
tb_logger=None,
save_img=opt['val']['save_img'])
if __name__ == '__main__':
main()