Skip to content

Commit

Permalink
[Import] Don't force transformers to be installed (#5035)
Browse files Browse the repository at this point in the history
* [Import] Don't force transformers to be installed

* make style
  • Loading branch information
patrickvonplaten authored Sep 14, 2023
1 parent e2033d2 commit 8dc93ad
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions src/diffusers/loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@


if is_transformers_available():
from transformers import CLIPTextModel, CLIPTextModelWithProjection, PreTrainedModel, PreTrainedTokenizer
from transformers import CLIPTextModel, CLIPTextModelWithProjection

if is_accelerate_available():
from accelerate import init_empty_weights
Expand Down Expand Up @@ -628,7 +628,7 @@ class TextualInversionLoaderMixin:
Load textual inversion tokens and embeddings to the tokenizer and text encoder.
"""

def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"):
def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821
r"""
Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
Expand All @@ -655,7 +655,7 @@ def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTra

return prompts

def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"):
def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821
r"""
Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
Expand Down Expand Up @@ -689,8 +689,8 @@ def load_textual_inversion(
self,
pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
token: Optional[Union[str, List[str]]] = None,
tokenizer: Optional[PreTrainedTokenizer] = None,
text_encoder: Optional[PreTrainedModel] = None,
tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
**kwargs,
):
r"""
Expand Down

0 comments on commit 8dc93ad

Please sign in to comment.