-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathpassgpt-16chars.yaml
25 lines (23 loc) · 966 Bytes
/
passgpt-16chars.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Execution-wide parameters
config_args:
seed: 14
maxchars: 16 # Maximum characters to be considered in your passwords
subsample: -1 # -1 means no subsampling training data
tokenizer_path: '' # Introdue the path or huggingface name for your tokenizer
train_data_path: '' # Path to your training data
# Details for model architecture. Set parameters directly for GPT2Config (https://huggingface.co/docs/transformers/model_doc/gpt2#transformers.GPT2Config)
model_args:
n_head: 12
n_layer: 8
# Set parameters directly for TrainingArguments (https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments)
training_args:
per_device_train_batch_size: 2048
gradient_accumulation_steps: 1
logging_steps: 250
save_total_limit: 1
num_train_epochs: 3
overwrite_output_dir: true
fp16: false
output_dir: '' # Where to store your checkpoints
report_to: "wandb"
save_steps: 50000