Skip to content

Commit

Permalink
Readme 30k fix (#65)
Browse files Browse the repository at this point in the history
* fixes

* fix

* nota bene

* link to supmat places section

* python -> python3 fix

* eval2_gpu fix
  • Loading branch information
cohimame authored and Roman Suvorov committed Dec 10, 2021
1 parent ce609ae commit 0449ee2
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 36 deletions.
25 changes: 16 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ Then download models for _perceptual loss_:
## Places
⚠️ NB: FID/SSIM/LPIPS metric values for Places that we see in LaMa paper are computed on 30000 images that we produce in evaluation section below.
For more details on evaluation data check [[Section 3. Dataset splits in Supplementary](https://ashukha.com/projects/lama_21/lama_supmat_2021.pdf#subsection.3.1)] ⚠️
On the host machine:
# Download data from http://places2.csail.mit.edu/download.html
Expand All @@ -170,18 +174,20 @@ On the host machine:
wget http://data.csail.mit.edu/places/places365/val_large.tar
wget http://data.csail.mit.edu/places/places365/test_large.tar
# Unpack and etc.
# Unpack train/test/val data and create .yaml config for it
bash fetch_data/places_standard_train_prepare.sh
bash fetch_data/places_standard_test_val_prepare.sh
bash fetch_data/places_standard_evaluation_prepare_data.sh
# Sample images for test and viz at the end of epoch
bash fetch_data/places_standard_test_val_sample.sh
bash fetch_data/places_standard_test_val_gen_masks.sh
# Run training
# You can change bs with data.batch_size=10
python bin/train.py -cn lama-fourier location=places_standard
python3 bin/train.py -cn lama-fourier location=places_standard
# To evaluate trained model and report metrics as in our paper
# we need to sample previously unseen 30k images and generate masks for them
bash fetch_data/places_standard_evaluation_prepare_data.sh
# Infer model on thick/thin/medium masks in 256 and 512 and run evaluation
# like this:
Expand All @@ -191,9 +197,10 @@ On the host machine:
outdir=$(pwd)/inference/random_thick_512 model.checkpoint=last.ckpt
python3 bin/evaluate_predicts.py \
$(pwd)/configs/eval_2gpu.yaml \
$(pwd)/configs/eval2_gpu.yaml \
$(pwd)/places_standard_dataset/evaluation/random_thick_512/ \
$(pwd)/inference/random_thick_512 $(pwd)/inference/random_thick_512_metrics.csv
$(pwd)/inference/random_thick_512 \
$(pwd)/inference/random_thick_512_metrics.csv
Expand All @@ -216,7 +223,7 @@ On the host machine:
bash fetch_data/celebahq_gen_masks.sh
# Run training
python bin/train.py -cn lama-fourier-celeba data.batch_size=10
python3 bin/train.py -cn lama-fourier-celeba data.batch_size=10
# Infer model on thick/thin/medium masks in 256 and run evaluation
# like this:
Expand Down Expand Up @@ -335,7 +342,7 @@ On the host machine:
# Run training
python bin/train.py -cn lama-fourier location=my_dataset data.batch_size=10
python3 bin/train.py -cn lama-fourier location=my_dataset data.batch_size=10
# Evaluation: LaMa training procedure picks best few models according to
# scores on my_dataset/val/
Expand All @@ -353,7 +360,7 @@ On the host machine:
# metrics calculation:
python3 bin/evaluate_predicts.py \
$(pwd)/configs/eval_2gpu.yaml \
$(pwd)/configs/eval2_gpu.yaml \
$(pwd)/my_dataset/eval/random_<size>_512/ \
$(pwd)/inference/my_dataset/random_<size>_512 \
$(pwd)/inference/my_dataset/random_<size>_512_metrics.csv
Expand Down
15 changes: 7 additions & 8 deletions fetch_data/eval_sampler.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
import os
import random


val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/'
val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/'
list_of_random_val_files = os.path.abspath('.') + '/places_standard_dataset/original/eval_random_files.txt'
val_files = [val_files_path + image for image in os.listdir(val_files_path)]

print(f'found {len(val_files)} images in {val_files_path}')
print(f'Sampling 30000 images out of {len(val_files)} images in {val_files_path}' + \
f'and put their paths to {list_of_random_val_files}')

random.shuffle(val_files)
val_files_random = val_files[0:2000]
print('In our paper we evaluate trained models on these 30k sampled (mask,image) pairs in our paper (check Sup. mat.)')

list_of_random_val_files = os.path.abspath('.') \
+ '/places_standard_dataset/original/eval_random_files.txt'
random.shuffle(val_files)
val_files_random = val_files[0:30000]

print(f'copying 2000 random images to {list_of_random_val_files}')
with open(list_of_random_val_files, 'w') as fw:
for filename in val_files_random:
fw.write(filename+'\n')
Expand Down
2 changes: 1 addition & 1 deletion fetch_data/places_standard_evaluation_prepare_data.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ mkdir -p places_standard_dataset/evaluation/random_thick_256/
mkdir -p places_standard_dataset/evaluation/random_thin_256/
mkdir -p places_standard_dataset/evaluation/random_medium_256/

# 1. sample 2000 new images
# 1. sample 30000 new images
OUT=$(python3 fetch_data/eval_sampler.py)
echo ${OUT}

Expand Down
39 changes: 21 additions & 18 deletions fetch_data/sampler.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,40 @@
import os
import random

test_files_path = os.path.abspath('.') + '/places_standard_dataset/original/test/'
test_files = [test_files_path + image for image in os.listdir(test_files_path)]
print(f'found {len(test_files)} images in {test_files_path}')
test_files_path = os.path.abspath('.') + '/places_standard_dataset/original/test/'
list_of_random_test_files = os.path.abspath('.') + '/places_standard_dataset/original/test_random_files.txt'

random.shuffle(test_files)
test_files_random = test_files[0:2000]
#print(test_files_random[0:10])
test_files = [
test_files_path + image for image in os.listdir(test_files_path)
]

list_of_random_test_files = os.path.abspath('.') \
+ '/places_standard_dataset/original/test_random_files.txt'
print(f'Sampling 2000 images out of {len(test_files)} images in {test_files_path}' + \
f'and put their paths to {list_of_random_test_files}')
print('Our training procedure will pick best checkpoints according to metrics, computed on these images.')

print(f'copying 100 random images to {list_of_random_test_files}')
random.shuffle(test_files)
test_files_random = test_files[0:2000]
with open(list_of_random_test_files, 'w') as fw:
for filename in test_files_random:
fw.write(filename+'\n')
print('...done')

# ----------------------------------------------------------------------------------

# --------------------------------

val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/'
val_files = [val_files_path + image for image in os.listdir(val_files_path)]
print(f'found {len(val_files)} images in {val_files_path}')
val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/'
list_of_random_val_files = os.path.abspath('.') + '/places_standard_dataset/original/val_random_files.txt'

random.shuffle(val_files)
val_files_random = val_files[0:100]
val_files = [
val_files_path + image for image in os.listdir(val_files_path)
]

list_of_random_val_files = os.path.abspath('.') \
+ '/places_standard_dataset/original/val_random_files.txt'
print(f'Sampling 100 images out of {len(val_files)} in {val_files_path} ' + \
f'and put their paths to {list_of_random_val_files}')
print('We use these images for visual check up of evolution of inpainting algorithm epoch to epoch' )

print(f'copying 100 random images to {list_of_random_val_files}')
random.shuffle(val_files)
val_files_random = val_files[0:100]
with open(list_of_random_val_files, 'w') as fw:
for filename in val_files_random:
fw.write(filename+'\n')
Expand Down

0 comments on commit 0449ee2

Please sign in to comment.