From cb9f788a9a92409212d37b12fc09c716949030e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Leszko?= Date: Fri, 10 Jan 2025 08:45:58 +0100 Subject: [PATCH] Remove liveportrait (#393) --- .../ai-runner-live-pipelines-docker.yaml | 11 +- runner/app/live/pipelines/liveportrait.py | 114 ------------------ runner/app/live/pipelines/loader.py | 3 - runner/app/live/streamer/streamer.py | 6 +- runner/dl_checkpoints.sh | 24 +--- .../docker/Dockerfile.live-base-liveportrait | 67 ---------- runner/requirements-liveportrait.txt | 13 -- worker/docker.go | 1 - 8 files changed, 4 insertions(+), 235 deletions(-) delete mode 100644 runner/app/live/pipelines/liveportrait.py delete mode 100644 runner/docker/Dockerfile.live-base-liveportrait delete mode 100644 runner/requirements-liveportrait.txt diff --git a/.github/workflows/ai-runner-live-pipelines-docker.yaml b/.github/workflows/ai-runner-live-pipelines-docker.yaml index 84048a32..c989339d 100644 --- a/.github/workflows/ai-runner-live-pipelines-docker.yaml +++ b/.github/workflows/ai-runner-live-pipelines-docker.yaml @@ -83,7 +83,7 @@ jobs: strategy: fail-fast: false matrix: - pipeline: [streamdiffusion, comfyui, liveportrait] + pipeline: [streamdiffusion, comfyui] steps: - name: Check out code uses: actions/checkout@v4.1.1 @@ -98,9 +98,6 @@ jobs: files_yaml: | base_dockerfile: - runner/docker/Dockerfile.live-base-${{ matrix.pipeline }} - liveportrait: - - runner/images/** - - runner/requirements-liveportrait.txt - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -120,11 +117,7 @@ jobs: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository && ( - steps.changed-files.outputs.base_dockerfile_any_changed == 'true' || - ( - matrix.pipeline == 'liveportrait' && - steps.changed-files.outputs.liveportrait_any_changed == 'true' - ) + steps.changed-files.outputs.base_dockerfile_any_changed == 'true' ) ) with: diff --git a/runner/app/live/pipelines/liveportrait.py b/runner/app/live/pipelines/liveportrait.py deleted file mode 100644 index 6b175220..00000000 --- a/runner/app/live/pipelines/liveportrait.py +++ /dev/null @@ -1,114 +0,0 @@ -from PIL import Image -from pydantic import BaseModel -import hashlib - -from omegaconf import OmegaConf -import cv2 -import numpy as np - - -import sys -import os -import logging -# FasterLivePotrait modules imports files from the root of the project, so we need to monkey patch the sys path -base_flip_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "FasterLivePortrait")) -sys.path.append(base_flip_dir) -from FasterLivePortrait.src.pipelines.faster_live_portrait_pipeline import FasterLivePortraitPipeline - -from .interface import Pipeline - - -def make_flip_path(rel_path): - return os.path.normpath(os.path.join(base_flip_dir, rel_path)) - -# Subset of configs from the trt_infer.yaml#infer_params -class LivePortraitInferParams(BaseModel): - class Config: - extra = 'forbid' - - crop_driving_video: bool = False - normalize_lip: bool = True - source_video_eye_retargeting: bool = False - video_editing_head_rotation: bool = False - eye_retargeting: bool = False - lip_retargeting: bool = False - stitching: bool = True - relative_motion: bool = True - pasteback: bool = True - do_crop: bool = True - do_rot: bool = True - - lip_normalize_threshold: float = 0.03 - source_video_eye_retargeting_threshold: float = 0.18 - driving_smooth_observation_variance: float = 1e-7 - driving_multiplier: float = 1.0 - - def to_omegaconf(self) -> OmegaConf: - is_flag = lambda field: field not in ['lip_normalize_threshold', 'source_video_eye_retargeting_threshold', 'driving_smooth_observation_variance', 'driving_multiplier'] - params = { - f'{"flag_" if is_flag(field) else ""}{field}': getattr(self, field) - for field in self.__dict__ - } - return OmegaConf.create(params) - -class LivePortraitParams(BaseModel): - class Config: - extra = 'forbid' - - src_image: str = 'flame-smile' - animal: bool = False - infer_params: LivePortraitInferParams = LivePortraitInferParams() - -base_pipe_config_path = make_flip_path('configs/trt_infer.yaml') - -class LivePortrait(Pipeline): - def __init__(self, **params): - super().__init__(**params) - self.pipe = None - self.update_params(**params) - - def process_frame(self, image: Image.Image) -> Image.Image: - cv2_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) - _, out_crop, _ = self.pipe.run(cv2_image, self.pipe.src_imgs[0], self.pipe.src_infos[0], first_frame=self.first_frame) - self.first_frame = False - - if out_crop is None: - logging.info(f"No face in driving frame") - return image - - return Image.fromarray(out_crop) - - def update_params(self, **params): - new_params = LivePortraitParams(**params) - if not os.path.isabs(new_params.src_image): - new_params.src_image = make_flip_path(f"assets/examples/source/{new_params.src_image}.jpg") - - logging.info(f"liveportrait new params: {new_params}") - - new_cfg = OmegaConf.load(base_pipe_config_path) - new_cfg.infer_params = OmegaConf.merge(new_cfg.infer_params, new_params.infer_params.to_omegaconf()) - new_cfg.infer_params.mask_crop_path = make_flip_path(new_cfg.infer_params.mask_crop_path) - for model_name in new_cfg.models: - model_params = new_cfg.models[model_name] - if isinstance(model_params.model_path, str): - model_params.model_path = make_flip_path(model_params.model_path) - else: - model_params.model_path = [make_flip_path(path) for path in model_params.model_path] - - config_hash = hashlib.md5(str(new_cfg).encode()).hexdigest() - logging.info(f"liveportrait new config hash: {config_hash}") - - new_pipe = FasterLivePortraitPipeline(cfg=new_cfg, is_animal=new_params.animal) - - prepared_src = new_pipe.prepare_source(new_params.src_image) - if not prepared_src: - raise ValueError(f"no face in {new_params.src_image}!") - - if self.pipe is not None: - self.pipe.clean_models() - - self.params = new_params - self.cfg = new_cfg - self.pipe = new_pipe - self.first_frame = True - self.prepared_src = prepared_src diff --git a/runner/app/live/pipelines/loader.py b/runner/app/live/pipelines/loader.py index bab137be..2862e4e1 100644 --- a/runner/app/live/pipelines/loader.py +++ b/runner/app/live/pipelines/loader.py @@ -4,9 +4,6 @@ def load_pipeline(name: str, **params) -> Pipeline: if name == "streamdiffusion": from .streamdiffusion import StreamDiffusion return StreamDiffusion(**params) - elif name == "liveportrait": - from .liveportrait import LivePortrait - return LivePortrait(**params) elif name == "comfyui": from .comfyui import ComfyUI return ComfyUI(**params) diff --git a/runner/app/live/streamer/streamer.py b/runner/app/live/streamer/streamer.py index c9ace3f8..01a39e77 100644 --- a/runner/app/live/streamer/streamer.py +++ b/runner/app/live/streamer/streamer.py @@ -242,11 +242,7 @@ async def monitor_loop(self): continue active_after_reload = time_since_last_output < (time_since_reload - 1) - stopped_recently = ( - time_since_last_output > 8 - if self.pipeline == "liveportrait" # liveportrait loads very quick but gets stuck too often - else active_after_reload and time_since_last_output > 5 and time_since_last_output < 60 - ) + stopped_recently = active_after_reload and time_since_last_output > 5 and time_since_last_output < 60 if stopped_recently or gone_stale: logging.warning( "No output received while inputs are being sent. Restarting process." diff --git a/runner/dl_checkpoints.sh b/runner/dl_checkpoints.sh index 6ebb88ef..ebab1fe9 100755 --- a/runner/dl_checkpoints.sh +++ b/runner/dl_checkpoints.sh @@ -87,7 +87,6 @@ function download_all_models() { function download_live_models() { huggingface-cli download KBlueLeaf/kohaku-v2.1 --include "*.safetensors" "*.json" "*.txt" --exclude ".onnx" ".onnx_data" --cache-dir models huggingface-cli download stabilityai/sd-turbo --include "*.safetensors" "*.json" "*.txt" --exclude ".onnx" ".onnx_data" --cache-dir models - huggingface-cli download warmshao/FasterLivePortrait --local-dir models/FasterLivePortrait--checkpoints huggingface-cli download yuvraj108c/Depth-Anything-Onnx --include depth_anything_vitl14.onnx --local-dir models/ComfyUI--models/Depth-Anything-Onnx download_sam2_checkpoints download_florence2_checkpoints @@ -145,27 +144,6 @@ function build_tensorrt_models() { done done" - # FasterLivePortrait - AI_RUNNER_LIVEPORTRAIT_IMAGE=${AI_RUNNER_LIVEPORTRAIT_IMAGE:-livepeer/ai-runner:live-app-liveportrait} - docker pull $AI_RUNNER_LIVEPORTRAIT_IMAGE - # ai-worker has tags hardcoded in `var livePipelineToImage` so we need to use the same tag in here: - docker image tag $AI_RUNNER_LIVEPORTRAIT_IMAGE livepeer/ai-runner:live-app-liveportrait - docker run --rm -v ./models:/models --gpus all -l TensorRT-engines \ - $AI_RUNNER_LIVEPORTRAIT_IMAGE \ - bash -c "cd /app/app/live/FasterLivePortrait && \ - if [ ! -f '/models/FasterLivePortrait--checkpoints/liveportrait_onnx/stitching_lip.trt' ]; then - echo 'Building TensorRT engines for LivePortrait models (regular)...' - sh scripts/all_onnx2trt.sh - else - echo 'Regular LivePortrait TensorRT engines already exist, skipping build' - fi && \ - if [ ! -f '/models/FasterLivePortrait--checkpoints/liveportrait_animal_onnx/stitching_lip.trt' ]; then - echo 'Building TensorRT engines for LivePortrait models (animal)...' - sh scripts/all_onnx2trt_animal.sh - else - echo 'Animal LivePortrait TensorRT engines already exist, skipping build' - fi" - # ComfyUI (only DepthAnything for now) AI_RUNNER_COMFYUI_IMAGE=${AI_RUNNER_COMFYUI_IMAGE:-livepeer/ai-runner:live-app-comfyui} docker pull $AI_RUNNER_COMFYUI_IMAGE @@ -242,7 +220,7 @@ echo "Starting livepeer AI subnet model downloader..." echo "Creating 'models' directory in the current working directory..." mkdir -p models mkdir -p models/checkpoints -mkdir -p models/StreamDiffusion--engines models/FasterLivePortrait--checkpoints models/ComfyUI--models models/ComfyUI--models/sam2--checkpoints models/ComfyUI--models/checkpoints +mkdir -p models/StreamDiffusion--engines models/ComfyUI--models models/ComfyUI--models/sam2--checkpoints models/ComfyUI--models/checkpoints # Ensure 'huggingface-cli' is installed. echo "Checking if 'huggingface-cli' is installed..." diff --git a/runner/docker/Dockerfile.live-base-liveportrait b/runner/docker/Dockerfile.live-base-liveportrait deleted file mode 100644 index 05e3e0b0..00000000 --- a/runner/docker/Dockerfile.live-base-liveportrait +++ /dev/null @@ -1,67 +0,0 @@ -ARG BASE_IMAGE=livepeer/ai-runner:live-base -FROM ${BASE_IMAGE} - -# Download and install the NVIDIA TensorRT repository local deb -RUN wget --progress=dot:mega https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.6.1/local_repos/nv-tensorrt-local-repo-ubuntu2204-8.6.1-cuda-12.0_1.0-1_amd64.deb && \ - dpkg -i nv-tensorrt-local-repo-ubuntu2204-8.6.1-cuda-12.0_1.0-1_amd64.deb && \ - cp /var/nv-tensorrt-local-repo-ubuntu2204-8.6.1-cuda-12.0/*-keyring.gpg /usr/share/keyrings/ && \ - rm nv-tensorrt-local-repo-ubuntu2204-8.6.1-cuda-12.0_1.0-1_amd64.deb - -# Install TensorRT Python library -RUN apt-get update && \ - apt-get install -y --no-install-recommends tensorrt && \ - rm -rf /var/lib/apt/lists/* - -# Install cmake and build dependencies for grid-sample3d-trt-plugin -RUN apt-get update && \ - apt-get install -y --no-install-recommends cmake build-essential && \ - rm -rf /var/lib/apt/lists/* - -# Build grid-sample3d-trt-plugin for FasterLivePortrait -RUN git clone https://github.com/SeanWangJS/grid-sample3d-trt-plugin.git /opt/grid-sample3d-trt-plugin && \ - cd /opt/grid-sample3d-trt-plugin && \ - sed -i 's/set_target_properties(${PROJECT_NAME} PROPERTIES CUDA_ARCHITECTURES ".*")/set_target_properties(${PROJECT_NAME} PROPERTIES CUDA_ARCHITECTURES "60;70;75;80;86")/' CMakeLists.txt && \ - mkdir build && cd build && \ - export PATH=/usr/local/cuda/bin:$PATH && \ - cmake .. -DTensorRT_ROOT=/usr/include && \ - make - -# Install required Python version -ARG PYTHON_VERSION=3.10 -RUN pyenv install $PYTHON_VERSION && \ - pyenv global $PYTHON_VERSION && \ - pyenv rehash - -# Upgrade pip and install required packages -ARG PIP_VERSION=23.3.2 -ENV PIP_PREFER_BINARY=1 -RUN pip install --no-cache-dir --upgrade pip==${PIP_VERSION} setuptools==69.5.1 wheel==0.43.0 - -# Install Python TensorRT packages -RUN pip install --no-cache-dir tensorrt==8.6.1 numpy==1.26.4 - -# Clone the FasterLivePortrait repository -RUN mkdir -p /app/app/live && \ - git clone https://github.com/warmshao/FasterLivePortrait.git /app/app/live/FasterLivePortrait && \ - cd /app/app/live/FasterLivePortrait && \ - git checkout 6aa8104b03499ebe2a881c8fbaf55ff628235f4f - -WORKDIR /app/app/live/FasterLivePortrait - -COPY images/flame-serious.jpg \ - images/flame-smile.jpg \ - images/heart.jpg \ - images/pirate.jpg \ - ./assets/examples/source/ - -# FasterLivePortrait doesn't pin versions so we use a custom requirements.txt -COPY requirements-liveportrait.txt requirements.txt -RUN pip install --no-cache-dir -r requirements.txt - -# TODO: Setup dependencies for animal models (needs some custom deps detected on runtime which are not on pypi) - -WORKDIR /app - -# Create symlinks for checkpoints as FasterLivePortrait relies heavily on relative paths -RUN ln -s /models/FasterLivePortrait--checkpoints /app/app/live/FasterLivePortrait/checkpoints -RUN ln -s /models/FasterLivePortrait--checkpoints ./checkpoints diff --git a/runner/requirements-liveportrait.txt b/runner/requirements-liveportrait.txt deleted file mode 100644 index c1150ee2..00000000 --- a/runner/requirements-liveportrait.txt +++ /dev/null @@ -1,13 +0,0 @@ -ffmpeg-python==0.2.0 -gradio==5.6.0 -insightface==0.7.3 -mediapipe==0.10.15 -numpy==1.26.4 -omegaconf==2.3.0 -onnx==1.17.0 -opencv-python==4.10.0.84 -pycuda==2024.1.2 -scikit-image==0.24.0 -torchgeometry==0.1.2 -torchvision==0.19.1 -nvidia-ml-py==12.560.30 diff --git a/worker/docker.go b/worker/docker.go index 1b30f350..9e64a4af 100644 --- a/worker/docker.go +++ b/worker/docker.go @@ -66,7 +66,6 @@ var pipelineToImage = map[string]string{ var livePipelineToImage = map[string]string{ "streamdiffusion": "livepeer/ai-runner:live-app-streamdiffusion", - "liveportrait": "livepeer/ai-runner:live-app-liveportrait", "comfyui": "livepeer/ai-runner:live-app-comfyui", "segment_anything_2": "livepeer/ai-runner:live-app-segment_anything_2", "noop": "livepeer/ai-runner:live-app-noop",