Skip to content
This repository has been archived by the owner on Oct 25, 2024. It is now read-only.

Commit

Permalink
CI add inference test for mosaicml-mpt-7b-chat (#157)
Browse files Browse the repository at this point in the history
Signed-off-by: jiafu zhang <jiafu.zhang@intel.com>
  • Loading branch information
jiafuzha authored Aug 23, 2023
1 parent e95fc32 commit ad4becd
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 10 deletions.
12 changes: 7 additions & 5 deletions .github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,8 @@ name: Chatbot inference on llama-2-7b-chat-hf
on:
workflow_call:

# If there is a new commit, the previous jobs will be canceled
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-lla-7b
cancel-in-progress: true

jobs:
Expand All @@ -16,18 +15,21 @@ jobs:
- name: Checkout
uses: actions/checkout@v2

- name: Load environment variables
run: cat ~/itrex-actions-runner/.env >> $GITHUB_ENV

- name: Build Docker Image
run: docker build ./ --target cpu --build-arg http_proxy="$HTTP_PROXY_IMAGE_BUILD" --build-arg https_proxy="$HTTPS_PROXY_IMAGE_BUILD" -f workflows/chatbot/inference/docker/Dockerfile -t chatbotinfer:latest && yes | docker container prune && yes | docker image prune
run: docker build ./ --target cpu --build-arg http_proxy="${{ env.HTTP_PROXY_IMAGE_BUILD }}" --build-arg https_proxy="${{ env.HTTPS_PROXY_IMAGE_BUILD }}" -f workflows/chatbot/inference/docker/Dockerfile -t chatbotinfer:latest && yes | docker container prune && yes | docker image prune

- name: Start Docker Container
run: |
cid=$(docker ps -q --filter "name=chatbotinfer")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="$HTTP_PROXY_CONTAINER_RUN" -e https_proxy="$HTTPS_PROXY_CONTAINER_RUN" --name="chatbotinfer" --hostname="chatbotinfer-container" chatbotinfer:latest
docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer" --hostname="chatbotinfer-container" chatbotinfer:latest
- name: Run Inference Test
run: |
docker exec "chatbotinfer" bash -c "cd /root/chatbot && source activate && conda activate chatbot-demo; python workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"$HF_ACCESS_TOKEN\" --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
docker exec "chatbotinfer" bash -c "cd /root/chatbot && source activate && conda activate chatbot-demo; python workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
- name: Stop Container
if: success() || failure()
Expand Down
41 changes: 41 additions & 0 deletions .github/workflows/chatbot-inference-mpt-7b-chat.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: Chatbot inference on mosaicml/mpt-7b-chat

on:
workflow_call:

concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-mpt-7b
cancel-in-progress: true

jobs:
inference:
name: inference test
runs-on: lms-lab
steps:
- name: Checkout
uses: actions/checkout@v2

- name: Load environment variables
run: cat ~/itrex-actions-runner/.env >> $GITHUB_ENV

- name: Build Docker Image
run: docker build ./ --target cpu --build-arg http_proxy="${{ env.HTTP_PROXY_IMAGE_BUILD }}" --build-arg https_proxy="${{ env.HTTPS_PROXY_IMAGE_BUILD }}" -f workflows/chatbot/inference/docker/Dockerfile -t chatbotinfer:latest && yes | docker container prune && yes | docker image prune

- name: Start Docker Container
run: |
cid=$(docker ps -q --filter "name=chatbotinfer")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer" --hostname="chatbotinfer-container" chatbotinfer:latest
- name: Run Inference Test
run: |
docker exec "chatbotinfer" bash -c "cd /root/chatbot && source activate && conda activate chatbot-demo; python workflows/chatbot/inference/generate.py --base_model_path \"mosaicml/mpt-7b-chat\" --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
- name: Stop Container
if: success() || failure()
run: |
cid=$(docker ps -q --filter "name=chatbotinfer")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
- name: Test Summary
run: echo "Inference completed successfully"
5 changes: 5 additions & 0 deletions .github/workflows/chatbot-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ on:
- './requirements.txt'
- '.github/workflows/chatbot-test.yml'
- '.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml'
- '.github/workflows/chatbot-inference-mpt-7b-chat.yml'
- 'intel_extension_for_transformers/**'
- 'workflows/chatbot/inference/**'
- 'workflows/dlsa/**'
Expand All @@ -25,3 +26,7 @@ jobs:
call-inference-llama-2-7b-chat-hf:
uses: ./.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml

call-inference-mpt-7b-chat:
uses: ./.github/workflows/chatbot-inference-mpt-7b-chat.yml


10 changes: 5 additions & 5 deletions workflows/chatbot/inference/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,12 +370,12 @@ def load_model(
tokenizer_name,
use_fast=False if (re.search("llama", model_name, re.IGNORECASE)
or re.search("neural-chat-7b-v2", model_name, re.IGNORECASE)) else True,
token=hf_access_token,
use_auth_token=hf_access_token,
)
if re.search("flan-t5", model_name, re.IGNORECASE):
with smart_context_manager(use_deepspeed=use_deepspeed):
model = AutoModelForSeq2SeqLM.from_pretrained(
model_name, low_cpu_mem_usage=True, token=hf_access_token
model_name, low_cpu_mem_usage=True, use_auth_token=hf_access_token
)
elif (re.search("mpt", model_name, re.IGNORECASE)
or re.search("neural-chat-7b-v1", model_name, re.IGNORECASE)):
Expand All @@ -388,7 +388,7 @@ def load_model(
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
torchscript=cpu_jit,
token=hf_access_token,
use_auth_token=hf_access_token,
)
elif (
re.search("gpt", model_name, re.IGNORECASE)
Expand All @@ -399,7 +399,7 @@ def load_model(
):
with smart_context_manager(use_deepspeed=use_deepspeed):
model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, token=hf_access_token
model_name, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, use_auth_token=hf_access_token
)
else:
raise ValueError(
Expand Down Expand Up @@ -477,7 +477,7 @@ def load_model(
from models.mpt.mpt_trace import jit_trace_mpt_7b, MPTTSModelForCausalLM

model = jit_trace_mpt_7b(model)
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, token=hf_access_token)
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, use_auth_token=hf_access_token)
model = MPTTSModelForCausalLM(
model, config, use_cache=use_cache, model_dtype=torch.bfloat16
)
Expand Down

0 comments on commit ad4becd

Please sign in to comment.