From 59a2388585a8ad6110110972d917fcd1bc56af52 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 23 Oct 2024 18:09:35 +0000 Subject: [PATCH 1/5] Bump diffusers, accelerate, and huggingface-hub. --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3065ad4c587..493c68ad7d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,12 +33,12 @@ classifiers = [ ] dependencies = [ # Core generation dependencies, pinned for reproducible builds. - "accelerate==0.30.1", + "accelerate==1.0.1", "bitsandbytes==0.43.3; sys_platform!='darwin'", "clip_anytorch==2.6.0", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel==2.0.2", "controlnet-aux==0.0.7", - "diffusers[torch]==0.27.2", + "diffusers[torch]==0.31.0", "gguf==0.10.0", "invisible-watermark==0.2.0", # needed to install SDXL base and refiner using their repo_ids "mediapipe>=0.10.7", # needed for "mediapipeface" controlnet model @@ -61,7 +61,7 @@ dependencies = [ # Core application dependencies, pinned for reproducible builds. "fastapi-events==0.11.1", "fastapi==0.111.0", - "huggingface-hub==0.23.1", + "huggingface-hub==0.26.1", "pydantic-settings==2.2.1", "pydantic==2.7.2", "python-socketio==5.11.1", From 85c0e0db1e174d87235cf7a3bf55ea60acff70e8 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 23 Oct 2024 18:25:12 +0000 Subject: [PATCH 2/5] Fix changed import for FromOriginalControlNetMixin after diffusers bump. --- invokeai/backend/util/hotfixes.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/util/hotfixes.py b/invokeai/backend/util/hotfixes.py index 7e362fe9589..95f2c904ad8 100644 --- a/invokeai/backend/util/hotfixes.py +++ b/invokeai/backend/util/hotfixes.py @@ -3,7 +3,7 @@ import diffusers import torch from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.loaders import FromOriginalControlNetMixin +from diffusers.loaders.single_file_model import FromOriginalModelMixin from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module from diffusers.models.embeddings import ( @@ -32,7 +32,9 @@ logger = InvokeAILogger.get_logger(__name__) -class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin): +# NOTE(ryand): I'm not the origina author of this code, but for future reference, it appears that this class was copied +# from diffusers in order to add support for the encoder_attention_mask argument. +class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): """ A ControlNet model. From 1ca57ade4d1e6799a8a02b9c9bd33fc8591605bd Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 23 Oct 2024 18:29:24 +0000 Subject: [PATCH 3/5] Fix huggingface_hub.errors imports after version bump. --- invokeai/backend/model_manager/metadata/fetch/huggingface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_manager/metadata/fetch/huggingface.py b/invokeai/backend/model_manager/metadata/fetch/huggingface.py index 8787ceeb36a..8c1d6e74aea 100644 --- a/invokeai/backend/model_manager/metadata/fetch/huggingface.py +++ b/invokeai/backend/model_manager/metadata/fetch/huggingface.py @@ -20,7 +20,7 @@ import requests from huggingface_hub import HfApi, configure_http_backend, hf_hub_url -from huggingface_hub.utils._errors import RepositoryNotFoundError, RevisionNotFoundError +from huggingface_hub.errors import RepositoryNotFoundError, RevisionNotFoundError from pydantic.networks import AnyHttpUrl from requests.sessions import Session From e88752983e752d59a06650927115e3d2d6f2e959 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 24 Oct 2024 19:03:17 +0000 Subject: [PATCH 4/5] Remove `load_safety_checker=False` from calls to from_single_file(...). This param has been deprecated, and by including it (even when set to False) the safety checker automatically gets downloaded. --- .../backend/model_manager/load/model_loaders/stable_diffusion.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index 572859dbaee..d82d4480cc8 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -134,7 +134,6 @@ def _load_from_singlefile( torch_dtype=self._torch_dtype, prediction_type=prediction_type, upcast_attention=upcast_attention, - load_safety_checker=False, ) if not submodel_type: From ae327c763bb06763061dd3302e2e94a27e605565 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 24 Oct 2024 19:18:20 +0000 Subject: [PATCH 5/5] Remove unused prediction_type and upcast_attention from from_single_file(...) calls. --- .../model_manager/load/model_loaders/stable_diffusion.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index d82d4480cc8..4113588b36f 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -117,8 +117,6 @@ def _load_from_singlefile( load_class = load_classes[config.base][config.variant] except KeyError as e: raise Exception(f"No diffusers pipeline known for base={config.base}, variant={config.variant}") from e - prediction_type = config.prediction_type.value - upcast_attention = config.upcast_attention # Without SilenceWarnings we get log messages like this: # site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. @@ -129,12 +127,7 @@ def _load_from_singlefile( # ['text_model.embeddings.position_ids'] with SilenceWarnings(): - pipeline = load_class.from_single_file( - config.path, - torch_dtype=self._torch_dtype, - prediction_type=prediction_type, - upcast_attention=upcast_attention, - ) + pipeline = load_class.from_single_file(config.path, torch_dtype=self._torch_dtype) if not submodel_type: return pipeline