From db1ebce13b471bedfde8f6920722c1ea31ee319f Mon Sep 17 00:00:00 2001 From: deepanker13 Date: Wed, 20 Dec 2023 10:37:30 +0530 Subject: [PATCH] fixes --- sdk/python/kubeflow/trainer/hf_dockerfile | 7 ++----- sdk/python/kubeflow/trainer/hf_llm_training.py | 4 ++-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/sdk/python/kubeflow/trainer/hf_dockerfile b/sdk/python/kubeflow/trainer/hf_dockerfile index d03c458238..0853a233ae 100644 --- a/sdk/python/kubeflow/trainer/hf_dockerfile +++ b/sdk/python/kubeflow/trainer/hf_dockerfile @@ -7,12 +7,9 @@ FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime # Copy the Python package and its source code into the container COPY . /app - # Copy the requirements.txt file into the container - COPY requirements.txt /app/requirements.txt - # Install any needed packages specified in requirements.txt - RUN pip install --no-cache-dir -r requirements.txt + RUN pip install --no-cache-dir -r /app/requirements.txt # Run storage.py when the container launches - ENTRYPOINT ["python", "hf_llm_training.py"] + ENTRYPOINT ["python", "/app/hf_llm_training.py"] \ No newline at end of file diff --git a/sdk/python/kubeflow/trainer/hf_llm_training.py b/sdk/python/kubeflow/trainer/hf_llm_training.py index 23ab4bb407..827a1e2ed2 100644 --- a/sdk/python/kubeflow/trainer/hf_llm_training.py +++ b/sdk/python/kubeflow/trainer/hf_llm_training.py @@ -81,7 +81,7 @@ def parse_arguments(): parser.add_argument("--token_dir", help="directory containing tokenizer") parser.add_argument("--dataset_dir", help="directory contaning dataset") parser.add_argument("--peft_config", help="peft_config") - parser.add_argument("--train_params", help="hugging face training parameters") + parser.add_argument("--train_parameters", help="hugging face training parameters") return parser.parse_args() @@ -91,4 +91,4 @@ def parse_arguments(): model, tokenizer = setup_model_and_tokenizer(args.token_dir, args.model_dir) train_data, eval_data = load_and_preprocess_data(args.dataset_dir, tokenizer) model = setup_peft_model(model, args.peft_config) - train_model(model, train_data, eval_data, tokenizer, args) + train_model(model, train_data, eval_data, tokenizer, args.train_parameters)