From 8dc93ad3e4d91456dc23af6c1b4b17ec0d98b26f Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 14 Sep 2023 11:42:10 +0200 Subject: [PATCH] [Import] Don't force transformers to be installed (#5035) * [Import] Don't force transformers to be installed * make style --- src/diffusers/loaders.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/diffusers/loaders.py b/src/diffusers/loaders.py index 2bec9a9ab349..5fbf6b819f55 100644 --- a/src/diffusers/loaders.py +++ b/src/diffusers/loaders.py @@ -42,7 +42,7 @@ if is_transformers_available(): - from transformers import CLIPTextModel, CLIPTextModelWithProjection, PreTrainedModel, PreTrainedTokenizer + from transformers import CLIPTextModel, CLIPTextModelWithProjection if is_accelerate_available(): from accelerate import init_empty_weights @@ -628,7 +628,7 @@ class TextualInversionLoaderMixin: Load textual inversion tokens and embeddings to the tokenizer and text encoder. """ - def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): + def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821 r""" Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual @@ -655,7 +655,7 @@ def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTra return prompts - def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): + def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821 r""" Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds to a multi-vector textual inversion embedding, this function will process the prompt so that the special token @@ -689,8 +689,8 @@ def load_textual_inversion( self, pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]], token: Optional[Union[str, List[str]]] = None, - tokenizer: Optional[PreTrainedTokenizer] = None, - text_encoder: Optional[PreTrainedModel] = None, + tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821 + text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 **kwargs, ): r"""