From 93341b7584ac8d585169c482a446646fbdab1833 Mon Sep 17 00:00:00 2001 From: shruti2522 Date: Mon, 22 Apr 2024 12:14:58 +0530 Subject: [PATCH] updated examples for train API Signed-off-by: shruti2522 --- examples/pytorch/language-modeling/train_api_hf_dataset.ipynb | 4 ++-- examples/pytorch/language-modeling/train_api_s3_dataset.ipynb | 4 ++-- examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/pytorch/language-modeling/train_api_hf_dataset.ipynb b/examples/pytorch/language-modeling/train_api_hf_dataset.ipynb index f284804e02..2b97218718 100644 --- a/examples/pytorch/language-modeling/train_api_hf_dataset.ipynb +++ b/examples/pytorch/language-modeling/train_api_hf_dataset.ipynb @@ -21,7 +21,7 @@ "from kubeflow.storage_initializer.s3 import S3DatasetParams\n", "from kubeflow.storage_initializer.hugging_face import (\n", " HuggingFaceModelParams,\n", - " HuggingFaceTrainParams,\n", + " HuggingFaceTrainerParams,\n", " HfDatasetParams,\n", ")\n", "from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n", @@ -71,7 +71,7 @@ " # it is assumed for text related tasks, you have 'text' column in the dataset.\n", " # for more info on how dataset is loaded check load_and_preprocess_data function in sdk/python/kubeflow/trainer/hf_llm_training.py\n", " dataset_provider_parameters=HfDatasetParams(repo_id=\"imdatta0/ultrachat_1k\"),\n", - " train_parameters=HuggingFaceTrainParams(\n", + " trainer_parameters=HuggingFaceTrainerParams(\n", " lora_config=LoraConfig(\n", " r=8,\n", " lora_alpha=8,\n", diff --git a/examples/pytorch/language-modeling/train_api_s3_dataset.ipynb b/examples/pytorch/language-modeling/train_api_s3_dataset.ipynb index 19038dfa1e..d9d8d2a842 100644 --- a/examples/pytorch/language-modeling/train_api_s3_dataset.ipynb +++ b/examples/pytorch/language-modeling/train_api_s3_dataset.ipynb @@ -20,7 +20,7 @@ "from kubeflow.training.api.training_client import TrainingClient\n", "from kubeflow.storage_initializer.hugging_face import (\n", " HuggingFaceModelParams,\n", - " HuggingFaceTrainParams,\n", + " HuggingFaceTrainerParams,\n", " HfDatasetParams,\n", ")\n", "from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n", @@ -90,7 +90,7 @@ " \"secret_key\": s3_secret_key,\n", " }\n", " ),\n", - " train_parameters=HuggingFaceTrainParams(\n", + " trainer_parameters=HuggingFaceTrainerParams(\n", " lora_config=LoraConfig(\n", " r=8,\n", " lora_alpha=8,\n", diff --git a/examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb b/examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb index 58778727c4..e28975a6f1 100644 --- a/examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb +++ b/examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb @@ -613,7 +613,7 @@ "from kubeflow.training import TrainingClient\n", "from kubeflow.storage_initializer.hugging_face import (\n", " HuggingFaceModelParams,\n", - " HuggingFaceTrainParams,\n", + " HuggingFaceTrainerParams,\n", " HfDatasetParams,\n", ")\n", "\n", @@ -651,7 +651,7 @@ " split=\"train[:3000]\",\n", " ),\n", " # Specify HuggingFace Trainer parameters. In this example, we will skip evaluation and model checkpoints.\n", - " train_parameters=HuggingFaceTrainParams(\n", + " trainer_parameters=HuggingFaceTrainerParams(\n", " training_parameters=transformers.TrainingArguments(\n", " output_dir=\"test_trainer\",\n", " save_strategy=\"no\",\n",