-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdefault.yaml
101 lines (98 loc) · 3.28 KB
/
default.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
data:
# Name of dataset: 'SIAR', 'SIAR_SL', 'SIAR_OCC', 'SIAR_OCC_Binary', 'SIAR_OCC_AND_SL', 'SIAR_EVAL'
dataset: 'SIAR'
# Split version to use
split_dir: 'split-1_80_10_10'
# Number of workers for dataloader
num_workers: 12
# Path to dataset directory that includes SIAR data (path/SIAR), generated occlusion masks (path/SIAR_OCC). If False, expects data to be in data directory of project
path_to_data: False
# Path where to save evaluation output (Only used for evaluation script)
eval_output: evaluation_output
train:
debug: False
pre_train: False
batch_size: 16
lr: 1e-4
max_epochs: 100
log_every_n_steps: 15
log_img_every_n_epochs: 5
device: 'cuda'
# Select loss function: 'base_loss', 'reconstruction_loss', 'pre_train_loss', 'separate_head_loss', 'occ_binary_pretraining_loss'
loss_func: 'separate_head_loss'
stage: 'train_all'
# Select metric to use for comparison: 'MSE', 'SSIM', 'MAE', 'BCE', 'BCEWithLogits', 'MAE_weighted'
metric_all: 'MAE' # used for reconstruction_loss and separate_head_loss (stage: 'train_all')
metric_gt: 'MAE'
metric_sl: 'MAE' # Use when stage: 'train_sl', 'train_all_heads'
metric_ob: 'MAE' # Use when stage: 'train_ob', 'train_all_heads'
# Metric for binary occlusion mask pretraining. Set to False when fine-tuning to automatically apply sigmoid to occlusion output
metric_occ_mask: False # 'BCEWithLogits', 'BCE', 'MAE'
metric_occ_rgb: False # 'MAE'
lambda_binary_occ: False # Set to false to automatically apply sigmoid to occlusion output
lambda_gt_loss: 0.7 # 1.0
lambda_decomp_loss: 1.0
lambda_occlusion_difference: 1e-3
mask_decay: 1e-15 # 1e-10
weight_decay: False # 1e-5
strategy: 'ddp_find_unused_parameters_true'
accumulate_grad_batches: 4
es_patience: 10
model:
checkpoint: 'checkpoints/final_model/one_for_all/model_oi_sl_occ.ckpt'
upsampler_gt: 'unet' # upsampler for ground truth
upsampler_sl: 'unet' # upsampler for shadow and light
upsampler_ob: 'unet' # upsampler for objects (masks + rgb)
swin:
checkpoint: False
use_checkpoint: False
frozen_stages: 4
patch_size: [2, 4, 4]
unet_gt:
freeze: False
checkpoint: False
decoder:
output_dim: 3
f_maps: [96, 192, 384, 768]
basic_module: 'DoubleConv'
conv_kernel_size: 3
conv_padding: 1
layer_order: 'gcr'
num_groups: 8
is3d: True
layers_no_skip:
size: [[96, 48, 5, 128, 128], [48, 24, 1, 256, 256]]
scale_factor: 2
omit_skip_connections: False
unet_sl:
freeze: False
checkpoint: False
decoder:
output_dim: 2
f_maps: [96, 192, 384, 768]
basic_module: 'DoubleConv'
conv_kernel_size: 3
conv_padding: 1
layer_order: 'gcr'
num_groups: 8
is3d: True
layers_no_skip:
size: [[96, 48, 5, 128, 128], [48, 24, 10, 256, 256]]
scale_factor: 2
omit_skip_connections: False
unet_ob:
freeze: False
checkpoint: False
decoder:
output_dim: 4
f_maps: [96, 192, 384, 768]
basic_module: 'DoubleConv'
conv_kernel_size: 3
conv_padding: 1
layer_order: 'gcr'
num_groups: 8
is3d: True
layers_no_skip:
size: [[96, 48, 5, 128, 128], [48, 24, 10, 256, 256]]
scale_factor: 2
omit_skip_connections: False