-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfinal1.sh
33 lines (30 loc) · 6.84 KB
/
final1.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0,1 python3 -W ignore -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env --master_port=1234 train.py --config configs/transibr_full.txt --expname transibr_gen_viewindependent_4layer_10srcviews_512rays_192points
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
# CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname transibr_gnt_sam_ft_viewindependent --dinofield --dino_dim 64 --folder_name SAM
CUDA_VISIBLE_DEVICES=0 python3 -W ignore train.py --config configs/transibr_full.txt --expname gsn