Skip to content

Commit

Permalink
Add ComfyUI pipeline (#269)
Browse files Browse the repository at this point in the history
  • Loading branch information
leszko authored Nov 14, 2024
1 parent 7df0b05 commit 4f90470
Show file tree
Hide file tree
Showing 7 changed files with 152 additions and 19 deletions.
78 changes: 78 additions & 0 deletions runner/app/live/pipelines/comfyui.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import os
import json
import torch
from PIL import Image
import asyncio
import numpy as np

from .interface import Pipeline
from comfystream.client import ComfyStreamClient

COMFY_UI_WORKSPACE_ENV = "COMFY_UI_WORKSPACE"
DEFAULT_WORKFLOW_JSON = '''
{
"1": {
"inputs": {
"images": ["2", 0]
},
"class_type": "SaveTensor",
"_meta": {
"title": "SaveTensor"
}
},
"2": {
"inputs": {
"engine": "depth_anything_vitl14-fp16.engine",
"images": ["3", 0]
},
"class_type": "DepthAnythingTensorrt",
"_meta": {
"title": "Depth Anything Tensorrt"
}
},
"3": {
"inputs": {},
"class_type": "LoadTensor",
"_meta": {
"title": "LoadTensor"
}
}
}
'''


class ComfyUI(Pipeline):
def __init__(self, **params):
super().__init__(**params)

comfy_ui_workspace = os.getenv(COMFY_UI_WORKSPACE_ENV)
self.client = ComfyStreamClient(cwd=comfy_ui_workspace)

params = {'prompt': json.loads(DEFAULT_WORKFLOW_JSON)}
self.update_params(**params)

# Comfy will cache nodes that only need to be run once (i.e. a node that loads model weights)
# We can run the prompt once before actual inputs come in to "warmup"
warmup_input = torch.randn(1, 512, 512, 3)
asyncio.get_event_loop().run_until_complete(self.client.queue_prompt(warmup_input))

def process_frame(self, image: Image.Image) -> Image.Image:
# Normalize by dividing by 255 to ensure the tensor values are between 0 and 1
image_np = np.array(image.convert("RGB")).astype(np.float32) / 255.0
# Convert from numpy to torch.Tensor
# Initially, the torch.Tensor will have shape HWC but we want BHWC
# unsqueeze(0) will add a batch dimension at the beginning of 1 which means we just have 1 image
image_tensor = torch.tensor(image_np).unsqueeze(0)

# Process using ComfyUI pipeline
result_tensor = asyncio.get_event_loop().run_until_complete(self.client.queue_prompt(image_tensor))

# Convert back from Tensor to PIL.Image
result_tensor = result_tensor.squeeze(0)
result_image_np = (result_tensor * 255).byte()
result_image = Image.fromarray(result_image_np.cpu().numpy())
return result_image

def update_params(self, **params):
# params['prompt'] is the JSON string with the ComfyUI workflow
self.client.set_prompt(params['prompt'])
3 changes: 3 additions & 0 deletions runner/app/live/pipelines/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,7 @@ def load_pipeline(name: str, **params) -> Pipeline:
elif name == "liveportrait":
from .liveportrait import LivePortrait
return LivePortrait(**params)
elif name == "comfyui":
from .comfyui import ComfyUI
return ComfyUI(**params)
raise ValueError(f"Unknown pipeline: {name}")
40 changes: 40 additions & 0 deletions runner/docker/Dockerfile.live-base-comfyui
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
ARG BASE_IMAGE=livepeer/ai-runner:live-base
FROM ${BASE_IMAGE}

# Create directory for ComfyUI custom nodes and models
RUN mkdir -p /comfyui/custom_nodes

# Install required Python version
ARG PYTHON_VERSION=3.10
RUN pyenv install $PYTHON_VERSION && \
pyenv global $PYTHON_VERSION && \
pyenv rehash

# Upgrade pip and install required packages
ARG PIP_VERSION=23.3.2
ENV PIP_PREFER_BINARY=1
RUN pip install --no-cache-dir --upgrade pip==${PIP_VERSION} setuptools==69.5.1 wheel==0.43.0

# Install ComfyUI-Depth-Anything-Tensorrt Node (https://github.com/yuvraj108c/ComfyUI-Depth-Anything-Tensorrt)
RUN cd /comfyui/custom_nodes && \
git clone https://github.com/yuvraj108c/ComfyUI-Depth-Anything-Tensorrt.git && \
cd ComfyUI-Depth-Anything-Tensorrt && \
pip install -r requirements.txt

# Upgrade TensorRT to 10.6.0
RUN pip uninstall -y tensorrt && \
pip install tensorrt==10.6.0

RUN pip install torch==2.5.1 torchvision torchaudio tqdm

# Install comfystream (which includes ComfyUI)
RUN pip install git+https://github.com/yondonfu/comfystream.git
RUN git clone https://github.com/yondonfu/comfystream.git && \
cd comfystream && \
pip install -r requirements.txt && \
cp -r nodes/tensor_utils /comfyui/custom_nodes/ && \
cd ..

# Set up ComfyUI workspace
ENV COMFY_UI_WORKSPACE="/comfyui"
RUN ln -s /comfyui/models /models
3 changes: 3 additions & 0 deletions runner/docker/Dockerfile.live-base-streamdiffusion
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@ RUN pip install --no-cache-dir --upgrade pip==${PIP_VERSION} setuptools==69.5.1
RUN pip install --no-cache-dir \
torch==2.1.0 \
torchvision==0.16.0 \
diffusers==0.30.0 \

xformers \
protobuf==5.27.2 \
--index-url https://download.pytorch.org/whl/cu121
RUN pip install huggingface-hub==0.23.2

Expand Down
42 changes: 27 additions & 15 deletions runner/docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,30 @@ To build a pipeline-specific container, you need to build the base container fir

This command builds the `segment-anything-2` pipeline-specific container using the Dockerfile located at [docker/Dockerfile.segment_anything_2](docker/Dockerfile.segment_anything_2) and tags it as `livepeer/ai-runner:segment-anything-2`.

### Steps to Build a Realtime Video AI Container

```bash
docker build -t livepeer/ai-runner:live-base . -f docker/Dockerfile.live-base
docker build -t livepeer/ai-runner:live-multimedia -f docker/Dockerfile.live-multimedia .
docker build -t livepeer/ai-runner:live-stream-diffusion -f docker/Dockerfile.live-stream-diffusion .
docker build -t livepeer/ai-runner:live-apps -f docker/Dockerfile.live-apps .
```

Then, you can run and test the Live Container with the following commands:
```bash
docker run -it --rm --name video-to-video -e PIPELINE=live-video-to-video -e MODEL_ID=KBlueLeaf/kohaku-v2.1 --gpus all -p 8000:8000 -v ./models:/models livepeer/ai-runner:live-apps
curl --location -H "Content-Type: application/json" 'http://localhost:8000/live-video-to-video' -X POST -d '{"stream_url":"http://<url-to-trickle-pull>"}'
```
### ComfyStream integration with Depth-Anything

1. Build Docker image
```
export PIPELINE=comfyui
docker build -t livepeer/ai-runner:live-base-${PIPELINE} -f docker/Dockerfile.live-base-${PIPELINE} .
docker build -t livepeer/ai-runner:live-app-${PIPELINE} -f docker/Dockerfile.live-app__PIPELINE__ --build-arg PIPELINE=${PIPELINE} .
```
2. Download Depth Anything model
```
mkdir models
wget https://huggingface.co/yuvraj108c/Depth-Anything-Onnx/resolve/main/depth_anything_vitl14.onnx -P models
```
3. Build Depth Anything Engine
```
docker run -it --rm --name video-to-video --gpus all -v ./models:/models livepeer/ai-runner:live-app-comfyui /bin/bash -c "cd /models; python /comfyui/custom_nodes/ComfyUI-Depth-Anything-Tensorrt/export_trt.py"
mkdir -p ./models/tensorrt/depth-anything
mv ./models/*.engine ./models/tensorrt/depth-anything
```
4. Start Docker container
```
docker run -it --rm --name video-to-video --gpus all -p 8000:8000 -v ./models:/models -e PIPELINE=live-video-to-video -e MODEL_ID=comfyui livepeer/ai-runner:live-app-comfyui
```
3 changes: 0 additions & 3 deletions runner/requirements.live-ai.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
diffusers==0.30.0
accelerate==0.30.1
transformers==4.43.3
fastapi==0.111.0
Expand All @@ -7,7 +6,6 @@ Pillow==10.3.0
python-multipart==0.0.9
uvicorn==0.30.0
huggingface_hub==0.23.2
xformers==0.0.23
triton>=2.1.0
peft==0.11.1
deepcache==0.1.1
Expand All @@ -16,7 +14,6 @@ scipy==1.13.0
numpy==1.26.4
av==12.1.0
sentencepiece== 0.2.0
protobuf==5.27.2
bitsandbytes==0.43.3
psutil==6.0.0
pyzmq==26.2.0
Expand Down
2 changes: 1 addition & 1 deletion runner/run-lv2v.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -ex

if [ $# -ne 2 ]; then
if [ $# -lt 2 ]; then
echo "Usage: $0 <input_room> <output_room>"
exit 1
fi
Expand Down

0 comments on commit 4f90470

Please sign in to comment.