From 05e8301e4593e2a67b4bae24f093dd12ce5cc7c2 Mon Sep 17 00:00:00 2001 From: Clark Saben <76020733+csaben@users.noreply.github.com> Date: Sun, 19 Nov 2023 11:56:38 -0500 Subject: [PATCH] finetune : add --n-gpu-layers flag info to --help (#4128) --- examples/finetune/finetune.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index af46e44a6e216..e991e37ef39f6 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1288,6 +1288,7 @@ static void train_print_usage(int argc, char ** argv, const struct train_params fprintf(stderr, " --model-base FNAME model path from which to load base model (default '%s')\n", params->fn_model_base); fprintf(stderr, " --lora-out FNAME path to save llama lora (default '%s')\n", params->fn_lora_out); fprintf(stderr, " --only-write-lora only save llama lora, don't do any training. use this if you only want to convert a checkpoint to a lora adapter.\n"); + fprintf(stderr, " --n-gpu-layers N Number of model layers to offload to GPU (default 0).\n"); fprintf(stderr, " --norm-rms-eps F RMS-Norm epsilon value (default %f)\n", params->f_norm_rms_eps); fprintf(stderr, " --rope-freq-base F Frequency base for ROPE (default %f)\n", params->rope_freq_base); fprintf(stderr, " --rope-freq-scale F Frequency scale for ROPE (default %f)\n", params->rope_freq_scale);