From a0cf9925880620000aa2d1948d61bf659ddfdfaa Mon Sep 17 00:00:00 2001 From: ZhengPeng7 Date: Sat, 11 May 2024 04:18:16 +0000 Subject: [PATCH] Add switch in shell scripts to run the process adaptively. COD/HRSOD needs lower LR. --- README.md | 1 + config.py | 32 ++++++++++++++++++++------------ test.sh | 35 ++++++++++++++++------------------- train.sh | 13 +++++++++---- 4 files changed, 46 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 36d4729..38332b0 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ Our BiRefNet has achieved SOTA on many similar HR tasks: + **Inference and evaluation** of your given weights: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1MaEiBfJ4xIaZZn0DqKrhydHB8X97hNXl#scrollTo=DJ4meUYjia6S) + **Online Inference with GUI** with adjustable resolutions: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/ZhengPeng7/BiRefNet_demo) ++ Online **Single Image Inference** on Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14Dqg7oeBkFEtchaHLNpig2BcdkZEogba?usp=drive_link) ## Third-Party Creations diff --git a/config.py b/config.py index 60fc168..432abc5 100644 --- a/config.py +++ b/config.py @@ -1,6 +1,5 @@ import os import math -import torch class Config(): @@ -13,7 +12,7 @@ def __init__(self) -> None: self.training_set = { 'DIS5K': ['DIS-TR', 'DIS-TR+DIS-TE1+DIS-TE2+DIS-TE3+DIS-TE4'][0], 'COD': 'TR-COD10K+TR-CAMO', - 'HRSOD': ['TR-DUTS', 'TR-DUTS+TR-HRSOD', 'TR-DUTS+TR-UHRSD', 'TR-HRSOD+TR-UHRSD', 'TR-DUTS+TR-HRSOD+TR-UHRSD'][3], + 'HRSOD': ['TR-DUTS', 'TR-HRSOD', 'TR-UHRSD', 'TR-DUTS+TR-HRSOD', 'TR-DUTS+TR-UHRSD', 'TR-HRSOD+TR-UHRSD', 'TR-DUTS+TR-HRSOD+TR-UHRSD'][5], 'DIS5K+HRSOD+HRS10K': 'DIS-TE1+DIS-TE2+DIS-TE3+DIS-TE4+DIS-TR+TE-HRS10K+TE-HRSOD+TE-UHRSD+TR-HRS10K+TR-HRSOD+TR-UHRSD', # leave DIS-VD for evaluation. 'P3M-10k': 'TR-P3M-10k', }[self.task] @@ -39,14 +38,14 @@ def __init__(self) -> None: self.IoU_finetune_last_epochs = [ 0, { - 'DIS5K': -100, - 'COD': -30, - 'HRSOD': -30, - 'DIS5K+HRSOD+HRS10K': -50, - 'P3M-10k': -30, + 'DIS5K': -50, + 'COD': -20, + 'HRSOD': -20, + 'DIS5K+HRSOD+HRS10K': -20, + 'P3M-10k': -20, }[self.task] ][1] # choose 0 to skip - self.lr = 1e-4 * math.sqrt(self.batch_size / 4) # adapt the lr linearly + self.lr = (1e-4 if 'DIS5K' in self.task else 1e-5) * math.sqrt(self.batch_size / 4) # DIS needs high lr to converge faster. Adapt the lr linearly self.size = 1024 self.num_workers = max(4, self.batch_size) # will be decrease to min(it, batch_size) at the initialization of the data_loader @@ -76,7 +75,7 @@ def __init__(self) -> None: self.progressive_ref = self.refine and True self.ender = self.progressive_ref and False self.scale = self.progressive_ref and 2 - self.auxiliary_classification = False + self.auxiliary_classification = False # Only for DIS5K, where class labels are saved in `dataset.py`. self.refine_iteration = 1 self.freeze_bb = False self.model = [ @@ -131,13 +130,22 @@ def __init__(self) -> None: self.SDPA_enabled = False # Bugs. Slower and errors occur in multi-GPUs # others - self.device = [0, 'cpu'][0 if torch.cuda.is_available() else 1] # .to(0) == .to('cuda:0') + self.device = [0, 'cpu'][0] # .to(0) == .to('cuda:0') self.batch_size_valid = 1 self.rand_seed = 7 run_sh_file = [f for f in os.listdir('.') if 'train.sh' == f] + [os.path.join('..', f) for f in os.listdir('..') if 'train.sh' == f] with open(run_sh_file[0], 'r') as f: lines = f.readlines() - self.save_last = int([l.strip() for l in lines if 'val_last=' in l][0].split('=')[-1]) - self.save_step = int([l.strip() for l in lines if 'step=' in l][0].split('=')[-1]) + self.save_last = int([l.strip() for l in lines if '"{}")'.format(self.task) in l and 'val_last=' in l][0].split('val_last=')[-1].split()[0]) + self.save_step = int([l.strip() for l in lines if '"{}")'.format(self.task) in l and 'step=' in l][0].split('step=')[-1].split()[0]) self.val_step = [0, self.save_step][0] + + def print_task(self) -> None: + # Return task for choosing settings in shell scripts. + print(self.task) + +if __name__ == '__main__': + config = Config() + config.print_task() + diff --git a/test.sh b/test.sh index d8ea2e9..3ea04a5 100755 --- a/test.sh +++ b/test.sh @@ -8,24 +8,21 @@ CUDA_VISIBLE_DEVICES=${devices} python inference.py --pred_root ${pred_root} echo Inference finished at $(date) # Evaluation -log_dir=e_logs -mkdir ${log_dir} - -testsets=DIS-VD && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -testsets=DIS-TE1 && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -testsets=DIS-TE2 && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -testsets=DIS-TE3 && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -testsets=DIS-TE4 && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & - -# testsets=CHAMELEON && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -# testsets=NC4K && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -# testsets=TE-CAMO && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -# testsets=TE-COD10K && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & - -# testsets=DAVIS-S && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -# testsets=TE-HRSOD && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -# testsets=TE-UHRSD && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -# testsets=DUT-OMRON && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & -# testsets=TE-DUTS && nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testsets} > ${log_dir}/eval_${testsets}.out 2>&1 & +log_dir=e_logs && mkdir ${log_dir} + +task=$(python3 config.py) +case "${task}" in + "DIS5K") testsets='DIS-VD,DIS-TE1,DIS-TE2,DIS-TE3,DIS-TE4' ;; + "COD") testsets='CHAMELEON,NC4K,TE-CAMO,TE-COD10K' ;; + "HRSOD") testsets='DAVIS-S,TE-HRSOD,TE-UHRSD,DUT-OMRON,TE-DUTS' ;; + "DIS5K+HRSOD+HRS10K") testsets='DIS-VD' ;; + "P3M-10k") testsets='TE-P3M-500-P,TE-P3M-500-NP' ;; +esac +testsets=(`echo ${testsets} | tr ',' ' '`) && testsets=${testsets[@]} + +for testset in ${testsets}; do + nohup python eval_existingOnes.py --pred_root ${pred_root} --data_lst ${testset} > ${log_dir}/eval_${testset}.out 2>&1 & +done + echo Evaluation started at $(date) diff --git a/train.sh b/train.sh index 8a83976..cbc10b6 100755 --- a/train.sh +++ b/train.sh @@ -1,10 +1,15 @@ #!/bin/bash # Run script -# DIS/COD/HRSOD/massive/P3M-10k: epochs,val_last,step:[600,200,10]/[150,50,10]/[150,50,10]/[300,100,10]/[150,50,10] +# Settings of training & test for different tasks. method="$1" -epochs=600 -val_last=200 -step=10 +task=$(python3 config.py) +case "${task}" in + "DIS5K") epochs=600 && val_last=200 && step=10 ;; + "COD") epochs=150 && val_last=50 && step=5 ;; + "HRSOD") epochs=150 && val_last=50 && step=5 ;; + "DIS5K+HRSOD+HRS10K") epochs=300 && val_last=50 && step=5 ;; + "P3M-10k") epochs=150 && val_last=50 && step=5 ;; +esac testsets=NO # Non-existing folder to skip. # testsets=TE-COD10K # for COD