Skip to content

Commit

Permalink
Fix bug in SDXL LoRA training.
Browse files Browse the repository at this point in the history
  • Loading branch information
RyanJDick committed Aug 13, 2023
1 parent 98fe471 commit 859c082
Showing 1 changed file with 1 addition and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ def run_training(config: FinetuneLoRASDXLConfig): # noqa: C901
lora_layers["unet"] = inject_lora_into_unet(unet, config.train_unet_non_attention_blocks)
if config.train_text_encoder:
lora_layers["text_encoder_1"] = inject_lora_into_clip_text_encoder(text_encoder_1, "lora_te1")
lora_layers["text_encoder_2"] = inject_lora_into_clip_text_encoder(text_encoder_1, "lora_te2")
lora_layers["text_encoder_2"] = inject_lora_into_clip_text_encoder(text_encoder_2, "lora_te2")

if config.xformers:
import xformers # noqa: F401
Expand Down

0 comments on commit 859c082

Please sign in to comment.