Skip to content
This repository has been archived by the owner on Oct 25, 2024. It is now read-only.

Commit

Permalink
remove chatbot test dependency on local code (#716)
Browse files Browse the repository at this point in the history
  • Loading branch information
chensuyue authored Nov 20, 2023
1 parent 674ae4d commit 12882d8
Show file tree
Hide file tree
Showing 10 changed files with 22 additions and 24 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ jobs:
run: |
cid=$(docker ps -q --filter "name=chatbotfinetune-hpu-s0")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-hpu-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-hpu:latest
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-hpu-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-hpu:latest
- name: Run Finetuning
run: |
cmd="python3 /root/chatbot/workflows/chatbot/fine_tuning/instruction_tuning_pipeline/finetune_clm.py \
cmd="python3 /intel-extension-for-transformers/workflows/chatbot/fine_tuning/instruction_tuning_pipeline/finetune_clm.py \
--model_name_or_path mosaicml/mpt-7b-chat \
--train_file /root/chatbot/.github/workflows/sample_data/alpaca_data_sample_45.json \
--train_file /intel-extension-for-transformers/.github/workflows/sample_data/alpaca_data_sample_45.json \
--bf16 True \
--output_dir ./mpt_peft_finetuned_model \
--num_train_epochs 3 \
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/chatbot-finetune-mpt-7b-chat.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
run: |
cid=$(docker ps -q --filter "name=chatbotfinetune-mpi-s0")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
numactl --cpunodebind=0 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-mpi:latest
numactl --cpunodebind=0 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-mpi:latest
master=$(docker inspect -f "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}" "chatbotfinetune-mpi-s0")
echo "master_node=$master" >> $GITHUB_OUTPUT
Expand All @@ -41,16 +41,16 @@ jobs:
run: |
cid=$(docker ps -q --filter "name=chatbotfinetune-mpi-s1")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
numactl --cpunodebind=1 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s1" --hostname="chatbotfinetune-container-mpi-s1" chatbotfinetune-mpi:latest
numactl --cpunodebind=1 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s1" --hostname="chatbotfinetune-container-mpi-s1" chatbotfinetune-mpi:latest
slave=$(docker inspect -f "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}" "chatbotfinetune-mpi-s1")
echo "slave_node=$slave" >> $GITHUB_OUTPUT
- name: Run Finetuning
run: |
sh .github/workflows/script/chatbot/prepare_ft_mpt-7b-chat_mpi.sh ${{ steps.master_container.outputs.master_node }} ${{ steps.slave_container.outputs.slave_node }}
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /root/chatbot && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
docker exec "chatbotfinetune-mpi-s1" bash -c "cd /root/chatbot && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /root/chatbot; source ./bash_setup.sh; mpirun -f ./hosts2 -n 2 -ppn 1 -genv OMP_NUM_THREADS=48 sh .github/workflows/script/chatbot/start_ft_mpt-7b-chat_mpi.sh"
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
docker exec "chatbotfinetune-mpi-s1" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /intel-extension-for-transformers; source ./bash_setup.sh; mpirun -f ./hosts2 -n 2 -ppn 1 -genv OMP_NUM_THREADS=48 sh .github/workflows/script/chatbot/start_ft_mpt-7b-chat_mpi.sh"
- name: Print Logs and Check Finetuning Status
if: success() || failure()
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,11 @@ jobs:
run: |
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
- name: Run Inference Test
run: |
docker exec "chatbotinfer-gha" bash -c "cd /root/chatbot && source activate && conda activate neuralchat;\
docker exec "chatbotinfer-gha" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat;\
git config --global --add safe.directory '*' && \
git submodule update --init --recursive && \
pip uninstall intel-extension-for-transformers -y; \
Expand All @@ -45,7 +45,7 @@ jobs:
if: always()
run: |
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
if [[ ! -z "$cid" ]]; then docker exec "chatbotinfer-gha" bash -c "rm -rf /root/chatbot/* && rm -rf /root/chatbot/.* || echo Clean" && docker stop $cid && docker rm $cid; fi
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
- name: Test Summary
run: echo "Inference completed successfully"
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,15 @@ jobs:
run: |
cid=$(docker ps -q --filter "name=chatbotinfer-hpu")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
- name: Run Inference Test without DeepSpeed
run: |
docker exec "chatbotinfer-hpu" bash -c "cd /root/chatbot; python workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
docker exec "chatbotinfer-hpu" bash -c "cd /intel-extension-for-transformers; python workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
- name: Run Inference Test with DeepSpeed
run: |
docker exec "chatbotinfer-hpu" bash -c "cd /root/chatbot; export HABANA_VISIBLE_MODULES=\"0,1\"; python workflows/chatbot/utils/gaudi_spawn.py --use_deepspeed --world_size 2 workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --use_kv_cache --task chat --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
docker exec "chatbotinfer-hpu" bash -c "cd /intel-extension-for-transformers; export HABANA_VISIBLE_MODULES=\"0,1\"; python workflows/chatbot/utils/gaudi_spawn.py --use_deepspeed --world_size 2 workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --use_kv_cache --task chat --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
- name: Stop Container
if: success() || failure()
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ jobs:
run: |
cid=$(docker ps -q --filter "name=chatbotinfer-hpu")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
- name: Run Inference Test
run: |
docker exec "chatbotinfer-hpu" bash -c "cd /root/chatbot; python workflows/chatbot/inference/generate.py --base_model_path \"mosaicml/mpt-7b-chat\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
docker exec "chatbotinfer-hpu" bash -c "cd /intel-extension-for-transformers; python workflows/chatbot/inference/generate.py --base_model_path \"mosaicml/mpt-7b-chat\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
- name: Stop Container
if: success() || failure()
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/chatbot-inference-mpt-7b-chat.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ jobs:
run: |
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
- name: Run Inference Test
run: |
docker exec "chatbotinfer-gha" bash -c "cd /root/chatbot && source activate && conda activate neuralchat; \
docker exec "chatbotinfer-gha" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat; \
git config --global --add safe.directory '*' && \
git submodule update --init --recursive && \
pip uninstall intel-extension-for-transformers -y; \
Expand All @@ -46,7 +46,7 @@ jobs:
if: always()
run: |
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
if [[ ! -z "$cid" ]]; then docker exec "chatbotinfer-gha" bash -c "rm -rf /root/chatbot/* && rm -rf /root/chatbot/.* || echo Clean" && docker stop $cid && docker rm $cid; fi
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
- name: Test Summary
run: echo "Inference completed successfully"
3 changes: 1 addition & 2 deletions .github/workflows/script/chatbot/hpu_check/run_check.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,5 @@ cid=$(docker ps -q --filter "name=$cont_name")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi

# run checks
script_dir=$(dirname "$0")
docker run --rm --runtime=habana -v $script_dir:/root/chatbot --name="$cont_name" --hostname="chatbot-hpu-check-container" "$image_name" bash -c "python /root/chatbot/to_hpu.py"
docker run --rm --runtime=habana --name="$cont_name" --hostname="chatbot-hpu-check-container" "$image_name" bash -c "python /intel-extension-for-transformers/.github/workflows/script/chatbot/hpu_check/to_hpu.py"

1 change: 0 additions & 1 deletion .github/workflows/script/chatbot/hpu_check/to_hpu.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import torch
from optimum.habana.transformers.modeling_utils import adapt_transformers_to_gaudi

ly = torch.nn.Linear(2, 4)
ly.to("hpu")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ export I_MPI_HYDRA_IFACE=eth0
EOF
)"
# for launching mpirun from yaml
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /root/chatbot; echo \"source activate && conda activate neuralchat\" > bash_setup.sh; echo export MASTER_ADDR=$master_node >> bash_setup.sh"
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /intel-extension-for-transformers; echo \"source activate && conda activate neuralchat\" > bash_setup.sh; echo export MASTER_ADDR=$master_node >> bash_setup.sh"
# for ssh setup mpi and oneccl properly
docker exec "chatbotfinetune-mpi-s0" bash -c "echo \"$prepare_script\" >> ~/.bashrc; echo export MASTER_ADDR=$master_node >> ~/.bashrc"

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cd /root/chatbot
cd /intel-extension-for-transformers
hname=$(hostname -s)
python3 workflows/chatbot/fine_tuning/instruction_tuning_pipeline/finetune_clm.py \
--model_name_or_path mosaicml/mpt-7b-chat \
Expand Down

0 comments on commit 12882d8

Please sign in to comment.