diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d539b1..451a665 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ## What's Changed +* Update colab notebook by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/128 * Add a Docker container for dot by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/95 * Fix of cusolver error on GPU by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/110 * Update the GUI, PyTorch and the documentation by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/107 diff --git a/notebooks/colab_demo.ipynb b/notebooks/colab_demo.ipynb index beab48c..78eb911 100644 --- a/notebooks/colab_demo.ipynb +++ b/notebooks/colab_demo.ipynb @@ -1,212 +1,261 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Deepfake Offensive Toolkit\n", - "\n", - "> **Disclaimer**: This notebook is primarily used for demo purposes on Google Colab.\n", - "\n", - "**Note**: We recommend running this notebook on Google Colab with GPU enabled.\n", - "\n", - "To enable GPU, do the following: \n", - "\n", - "`Click \"Runtime\" tab > select \"Change runtime type\" option > set \"Hardware accelerator\" to \"GPU\"`\n", - "\n", - "### Install Notebook Pre-requisites:\n", - "\n", - "We install the following pre-requisities:\n", - "- `ffmpeg`\n", - "- `conda` (via [condacolab](https://github.com/conda-incubator/condacolab))\n", - "\n", - "Note: The notebook session will restart after installing the pre-requisites. \n", - "\n", - "**RUN THE BELOW CELL ONLY ONCE.**\n", - "\n", - "**ONCE THE NOTEBOOK SESSION RESTARTS, SKIP THIS CELL MOVE TO \"STEP 1\" SECTION OF THIS NOTEBOOK**" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "view-in-github" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rOTJFaF9WIqg" + }, + "source": [ + "# Deepfake Offensive Toolkit\n", + "\n", + "> **Disclaimer**: This notebook is primarily used for demo purposes on Google Colab.\n", + "\n", + "**Note**: We recommend running this notebook on Google Colab with GPU enabled.\n", + "\n", + "To enable GPU, do the following:\n", + "\n", + "`Click \"Runtime\" tab > select \"Change runtime type\" option > set \"Hardware accelerator\" to \"GPU\"`\n", + "\n", + "### Install Notebook Pre-requisites:\n", + "\n", + "We install the following pre-requisities:\n", + "- `ffmpeg`\n", + "- `conda` (via [condacolab](https://github.com/conda-incubator/condacolab))\n", + "\n", + "Note: The notebook session will restart after installing the pre-requisites.\n", + "\n", + "**RUN THE BELOW CELL ONLY ONCE.**\n", + "\n", + "**ONCE THE NOTEBOOK SESSION RESTARTS, SKIP THIS CELL MOVE TO \"STEP 1\" SECTION OF THIS NOTEBOOK**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GnL7GZXGWIqo" + }, + "outputs": [], + "source": [ + "# install linux pre-requisites\n", + "!sudo apt install ffmpeg\n", + "\n", + "# install miniconda3\n", + "!pip install -q condacolab\n", + "import condacolab\n", + "condacolab.install_miniconda()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9oI_egyVWIqq" + }, + "source": [ + "## Step 1 - Clone Repository" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LvZL-BD0WIqq" + }, + "outputs": [], + "source": [ + "import os\n", + "os.chdir('/content')\n", + "CODE_DIR = 'dot'\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gTnnBM5xWIqr" + }, + "outputs": [], + "source": [ + "!git clone https://github.com/sensity-ai/dot.git $CODE_DIR\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Hgx6JdrrWIqr" + }, + "outputs": [], + "source": [ + "os.chdir(f'./{CODE_DIR}')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Nb3q4HbSWIqs" + }, + "source": [ + "## Step 2 - Setup Conda Environment\n", + "\n", + "**ONCE THE INSTALLATION IS COMPLETE, RESTART THE NOTEBOOK AND MOVE TO \"STEP 2\" SECTION OF THIS NOTEBOOK**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VkLiUqtbWIqt" + }, + "outputs": [], + "source": [ + "# update base conda environment: install python=3.8 + cudatoolkit=11.8\n", + "!conda install python=3.8 cudatoolkit=11.8\n", + "\n", + "# install pip requirements\n", + "!pip install llvmlite==0.38.1 onnxruntime-gpu==1.9.0\n", + "!pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118\n", + "!pip install -r requirements.txt\n", + "\n", + "# install dot\n", + "!pip install -e .\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cuCaEkOiWIqy" + }, + "source": [ + "## Step 2 - Download Pretrained models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RVQqmGmsWIqy" + }, + "outputs": [], + "source": [ + "%cd /content/dot\n", + "\n", + "# download binaries\n", + "!gdown 1Qaf9hE62XSvgmxR43dfiwEPWWS_dXSCE\n", + "\n", + "# unzip binaries\n", + "!unzip dot_model_checkpoints.zip\n", + "\n", + "# clean-up\n", + "!rm -rf *.z*\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEYtimAjWIqz" + }, + "source": [ + "## Step 3: Run dot on image and video files instead of camera feed\n", + "\n", + "### Using SimSwap on Images\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cA0H6ynvWIq0" + }, + "outputs": [], + "source": [ + "!dot \\\n", + "-c ./configs/simswap.yaml \\\n", + "--target \"data/\" \\\n", + "--source \"data/\" \\\n", + "--save_folder \"image_simswap_output/\" \\\n", + "--use_image \\\n", + "--use_gpu\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MKbRDeSAWIq0" + }, + "source": [ + "### Using SimSwap on Videos" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rJqqmy2vD8uf" + }, + "outputs": [], + "source": [ + "!dot \\\n", + "-c ./configs/simswap.yaml \\\n", + "--source \"data/\" \\\n", + "--target \"data/\" \\\n", + "--save_folder \"video_simswap_output/\" \\\n", + "--limit 1 \\\n", + "--use_video \\\n", + "--use_gpu" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oBJOJ2NWWIq1" + }, + "outputs": [], + "source": [ + "!python scripts/video_swap.py \\\n", + "-s \"data/\" \\\n", + "-t \"data/\" \\\n", + "-o \"video_simswap_output/\" \\\n", + "-d 5 \\\n", + "-l 1\n" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "include_colab_link": true, + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3" + } }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# install linux pre-requisites\n", - "!sudo apt install ffmpeg\n", - "\n", - "# install miniconda3\n", - "!pip install -q condacolab\n", - "import condacolab\n", - "condacolab.install_miniconda()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 1 - Clone Repository" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.chdir('/content')\n", - "CODE_DIR = 'dot'\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!git clone https://github.com/sensity-ai/dot.git $CODE_DIR\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "os.chdir(f'./{CODE_DIR}')\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 2 - Setup Conda Environment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# update base conda environment: install python=3.8 + cudatoolkit=11.3\n", - "!conda install python=3.8 cudatoolkit=11.3\n", - "\n", - "# install pip requirements\n", - "!pip install llvmlite==0.36.0 onnxruntime-gpu==1.9.0\n", - "!pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113\n", - "!pip install -r requirements.txt\n", - "\n", - "# install dot\n", - "!pip install -e .\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 2 - Download Pretrained models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# download binaries\n", - "! wget https://github.com/sensity-ai/dot/releases/download/1.0.0/dot_model_checkpoints.z01 \\\n", - "&& wget https://github.com/sensity-ai/dot/releases/download/1.0.0/dot_model_checkpoints.z02 \\\n", - "&& wget https://github.com/sensity-ai/dot/releases/download/1.0.0/dot_model_checkpoints.zip\n", - "\n", - "# unzip binaries\n", - "! zip -s 0 dot_model_checkpoints.zip --out saved_models.zip \\\n", - "&& unzip saved_models.zip\n", - "\n", - "# clean-up\n", - "!rm -rf *.z*\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 3: Run dot on image and video files instead of camera feed\n", - "\n", - "### Using SimSwap on Images\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!dot \\\n", - "-c ./configs/simswap.yaml \\\n", - "--target \"data/\" \\\n", - "--source \"data/\" \\\n", - "--save_folder \"image_simswap_output/\" \\\n", - "--use_image \\\n", - "--use_gpu\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Using SimSwap on Videos" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python scripts/video_swap.py \\\n", - "-s \"data/\" \\\n", - "-t \"data/\" \\\n", - "-o \"video_simswap_output/\" \\\n", - "-d 5 \\\n", - "-l 5\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "colab_demo.ipynb", - "provenance": [] - }, - "gpuClass": "standard", - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/src/dot/commons/video/video_utils.py b/src/dot/commons/video/video_utils.py index 9916872..3afc1b5 100644 --- a/src/dot/commons/video/video_utils.py +++ b/src/dot/commons/video/video_utils.py @@ -101,7 +101,7 @@ def video_pipeline( crop_size (int, optional): Face crop size. Defaults to 224. limit (int, optional): Limit number of video-swaps. Defaults to None. """ - head_pose = kwargs["head_pose"] + head_pose = kwargs.get("head_pose", False) source_imgs = find_files_from_path(source, ["jpg", "png", "jpeg"], filter=None) target_videos = find_files_from_path(target, ["avi", "mp4", "mov", "MOV"]) if not source_imgs or not target_videos: