Skip to content

Commit

Permalink
Add support for huber_scale
Browse files Browse the repository at this point in the history
  • Loading branch information
bmaltais committed Dec 31, 2024
1 parent 45cfc1d commit fce89ad
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 0 deletions.
8 changes: 8 additions & 0 deletions kohya_gui/class_advanced_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,14 @@ def list_vae_files(path):
step=0.01,
info="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type",
)
self.huber_scale = gr.Number(
label="Huber scale",
value=self.config.get("advanced.huber_scale", 1.0),
minimum=0.0,
maximum=1.0,
step=0.01,
info="The Huber loss scale parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type.",
)

with gr.Row():
self.save_every_n_steps = gr.Number(
Expand Down
5 changes: 5 additions & 0 deletions kohya_gui/dreambooth_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ def save_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
weighted_captions,
Expand Down Expand Up @@ -367,6 +368,7 @@ def open_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
weighted_captions,
Expand Down Expand Up @@ -570,6 +572,7 @@ def train_model(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
weighted_captions,
Expand Down Expand Up @@ -907,6 +910,7 @@ def train_model(
"gradient_accumulation_steps": int(gradient_accumulation_steps),
"gradient_checkpointing": gradient_checkpointing,
"huber_c": huber_c,
"huber_scale": huber_scale,
"huber_schedule": huber_schedule,
"huggingface_path_in_repo": huggingface_path_in_repo,
"huggingface_repo_id": huggingface_repo_id,
Expand Down Expand Up @@ -1341,6 +1345,7 @@ def dreambooth_tab(
advanced_training.loss_type,
advanced_training.huber_schedule,
advanced_training.huber_c,
advanced_training.huber_scale,
advanced_training.vae_batch_size,
advanced_training.min_snr_gamma,
advanced_training.weighted_captions,
Expand Down
5 changes: 5 additions & 0 deletions kohya_gui/finetune_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ def save_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
weighted_captions,
Expand Down Expand Up @@ -381,6 +382,7 @@ def open_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
weighted_captions,
Expand Down Expand Up @@ -601,6 +603,7 @@ def train_model(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
weighted_captions,
Expand Down Expand Up @@ -973,6 +976,7 @@ def train_model(
"gradient_accumulation_steps": int(gradient_accumulation_steps),
"gradient_checkpointing": gradient_checkpointing,
"huber_c": huber_c,
"huber_scale": huber_scale,
"huber_schedule": huber_schedule,
"huggingface_repo_id": huggingface_repo_id,
"huggingface_token": huggingface_token,
Expand Down Expand Up @@ -1473,6 +1477,7 @@ def list_presets(path):
advanced_training.loss_type,
advanced_training.huber_schedule,
advanced_training.huber_c,
advanced_training.huber_scale,
advanced_training.vae_batch_size,
advanced_training.min_snr_gamma,
weighted_captions,
Expand Down
5 changes: 5 additions & 0 deletions kohya_gui/lora_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ def save_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
save_every_n_steps,
Expand Down Expand Up @@ -449,6 +450,7 @@ def open_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
save_every_n_steps,
Expand Down Expand Up @@ -752,6 +754,7 @@ def train_model(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
save_every_n_steps,
Expand Down Expand Up @@ -1409,6 +1412,7 @@ def train_model(
"gradient_checkpointing": gradient_checkpointing,
"highvram": highvram,
"huber_c": huber_c,
"huber_scale": huber_scale,
"huber_schedule": huber_schedule,
"huggingface_repo_id": huggingface_repo_id,
"huggingface_token": huggingface_token,
Expand Down Expand Up @@ -2659,6 +2663,7 @@ def update_LoRA_settings(
advanced_training.loss_type,
advanced_training.huber_schedule,
advanced_training.huber_c,
advanced_training.huber_scale,
advanced_training.vae_batch_size,
advanced_training.min_snr_gamma,
advanced_training.save_every_n_steps,
Expand Down
5 changes: 5 additions & 0 deletions kohya_gui/textual_inversion_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ def save_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
save_every_n_steps,
Expand Down Expand Up @@ -319,6 +320,7 @@ def open_configuration(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
save_every_n_steps,
Expand Down Expand Up @@ -478,6 +480,7 @@ def train_model(
loss_type,
huber_schedule,
huber_c,
huber_scale,
vae_batch_size,
min_snr_gamma,
save_every_n_steps,
Expand Down Expand Up @@ -771,6 +774,7 @@ def train_model(
"gradient_accumulation_steps": int(gradient_accumulation_steps),
"gradient_checkpointing": gradient_checkpointing,
"huber_c": huber_c,
"huber_scale": huber_scale,
"huber_schedule": huber_schedule,
"huggingface_repo_id": huggingface_repo_id,
"huggingface_token": huggingface_token,
Expand Down Expand Up @@ -1225,6 +1229,7 @@ def list_embedding_files(path):
advanced_training.loss_type,
advanced_training.huber_schedule,
advanced_training.huber_c,
advanced_training.huber_scale,
advanced_training.vae_batch_size,
advanced_training.min_snr_gamma,
advanced_training.save_every_n_steps,
Expand Down

0 comments on commit fce89ad

Please sign in to comment.