forked from cassanof/finetuning-harness
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_lora.sh
executable file
·30 lines (30 loc) · 973 Bytes
/
run_lora.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#CUDA_VISIBLE_DEVICES=... python3 -m torch.distributed.launch \
python3 -m torch.distributed.launch \
--nproc_per_node 6 train.py \
--model_path="bigcode/gpt_bigcode-santacoder" \
--model_revision="main" \
--dataset_name="bigcode/starcoderdata" \
--subset="lua" \
--data_column "content" \
--split="train" \
--output_dir="./model_lora" \
--seq_length 2048 \
--max_steps 16000 \
--batch_size 16 \
--gradient_accumulation_steps 1 \
--learning_rate 1e-4 \
--num_warmup_steps 100 \
--eval_freq 500 \
--save_freq 500 \
--streaming \
--log_freq 1 \
--num_workers=$(expr $(nproc --all) - 4) \
--no_fp16 \
--bf16 \
--lora \
--lora_r 32 \
--lora_alpha 32 \
--lora_dropout 0.1 \
# --hub_model_id="TODO" \
# --push_to_hub \
#--checkpoint "chk/last-checkpoint"