From 5c3d6a60828e7791bfe9e04c6ce2e4837970dd1a Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 11:01:41 -0400 Subject: [PATCH 01/10] fix to make zero set to null --- .../_experimental/sd_dpo_lora/config.py | 5 ++-- .../pipelines/flux/lora/config.py | 6 ++-- .../pipelines/stable_diffusion/lora/config.py | 5 ++-- .../textual_inversion/config.py | 2 +- .../stable_diffusion_xl/finetune/config.py | 2 +- .../stable_diffusion_xl/lora/config.py | 5 ++-- .../lora_and_textual_inversion/config.py | 8 ++--- .../config_groups/flux_lora_config_group.py | 13 ++++---- .../ui/config_groups/sd_lora_config_group.py | 15 ++++++---- .../sd_textual_inversion_config_group.py | 5 ++-- .../sdxl_finetune_config_group.py | 5 ++-- ...lora_and_textual_inversion_config_group.py | 30 ++++++++++++++----- .../config_groups/sdxl_lora_config_group.py | 15 ++++++---- .../sdxl_textual_inversion_config_group.py | 5 ++-- 14 files changed, 75 insertions(+), 46 deletions(-) diff --git a/src/invoke_training/pipelines/_experimental/sd_dpo_lora/config.py b/src/invoke_training/pipelines/_experimental/sd_dpo_lora/config.py index f6ad39a4..988a8b8a 100644 --- a/src/invoke_training/pipelines/_experimental/sd_dpo_lora/config.py +++ b/src/invoke_training/pipelines/_experimental/sd_dpo_lora/config.py @@ -95,11 +95,12 @@ class SdDirectPreferenceOptimizationLoraConfig(BasePipelineConfig): text_encoder_learning_rate: float | None = None """The learning rate to use for the text encoder model. If set, this overrides the optimizer's default learning - rate. + rate. Set to null or 0 to use the optimizer's default learning rate. """ unet_learning_rate: float | None = None """The learning rate to use for the UNet model. If set, this overrides the optimizer's default learning rate. + Set to null or 0 to use the optimizer's default learning rate. """ lr_scheduler: Literal[ @@ -194,7 +195,7 @@ class SdDirectPreferenceOptimizationLoraConfig(BasePipelineConfig): """ max_grad_norm: float | None = None - """Max gradient norm for clipping. Set to None for no clipping. + """Max gradient norm for clipping. Set to null or 0 for no clipping. """ validation_prompts: list[str] = [] diff --git a/src/invoke_training/pipelines/flux/lora/config.py b/src/invoke_training/pipelines/flux/lora/config.py index e631fb71..eaa67405 100644 --- a/src/invoke_training/pipelines/flux/lora/config.py +++ b/src/invoke_training/pipelines/flux/lora/config.py @@ -52,12 +52,12 @@ class FluxLoraConfig(BasePipelineConfig): text_encoder_learning_rate: float | None = 1e-4 """The learning rate to use for the text encoder model. If set, this overrides the optimizer's default learning - rate. + rate. Set to null or 0 to use the optimizer's default learning rate. """ transformer_learning_rate: float | None = 1e-4 """The learning rate to use for the transformer model. If set, this overrides the optimizer's default learning - rate. + rate. Set to null or 0 to use the optimizer's default learning rate. """ lr_scheduler: Literal[ @@ -176,7 +176,7 @@ class FluxLoraConfig(BasePipelineConfig): """ max_grad_norm: float | None = None - """Max gradient norm for clipping. Set to None for no clipping. + """Max gradient norm for clipping. Set to null or 0 for no clipping. """ validation_prompts: list[str] = [] diff --git a/src/invoke_training/pipelines/stable_diffusion/lora/config.py b/src/invoke_training/pipelines/stable_diffusion/lora/config.py index 2a472506..0e9c08ce 100644 --- a/src/invoke_training/pipelines/stable_diffusion/lora/config.py +++ b/src/invoke_training/pipelines/stable_diffusion/lora/config.py @@ -59,11 +59,12 @@ class SdLoraConfig(BasePipelineConfig): text_encoder_learning_rate: float | None = None """The learning rate to use for the text encoder model. If set, this overrides the optimizer's default learning - rate. + rate. Set to null or 0 to use the optimizer's default learning rate. """ unet_learning_rate: float | None = None """The learning rate to use for the UNet model. If set, this overrides the optimizer's default learning rate. + Set to null or 0 to use the optimizer's default learning rate. """ lr_scheduler: Literal[ @@ -188,7 +189,7 @@ class SdLoraConfig(BasePipelineConfig): """ max_grad_norm: float | None = None - """Max gradient norm for clipping. Set to None for no clipping. + """Max gradient norm for clipping. Set to null or 0 for no clipping. """ validation_prompts: list[str] = [] diff --git a/src/invoke_training/pipelines/stable_diffusion/textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion/textual_inversion/config.py index 4b763b84..69295d5a 100644 --- a/src/invoke_training/pipelines/stable_diffusion/textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion/textual_inversion/config.py @@ -160,7 +160,7 @@ class SdTextualInversionConfig(BasePipelineConfig): """ max_grad_norm: float | None = None - """Maximum gradient norm for gradient clipping. Set to `None` for no clipping. + """Maximum gradient norm for gradient clipping. Set to `null` or 0 for no clipping. """ validation_prompts: list[str] = [] diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/finetune/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/finetune/config.py index 08dbfce8..6230ca82 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/finetune/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/finetune/config.py @@ -125,7 +125,7 @@ class SdxlFinetuneConfig(BasePipelineConfig): """ max_grad_norm: float | None = None - """Max gradient norm for clipping. Set to None for no clipping. + """Max gradient norm for clipping. Set to null or 0 for no clipping. """ validation_prompts: list[str] = [] diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora/config.py index 125db57a..390e8f90 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora/config.py @@ -59,11 +59,12 @@ class SdxlLoraConfig(BasePipelineConfig): text_encoder_learning_rate: float | None = None """The learning rate to use for the text encoder model. If set, this overrides the optimizer's default learning - rate. + rate. Set to null or 0 to use the optimizer's default learning rate. """ unet_learning_rate: float | None = None """The learning rate to use for the UNet model. If set, this overrides the optimizer's default learning rate. + Set to null or 0 to use the optimizer's default learning rate. """ lr_scheduler: Literal[ @@ -188,7 +189,7 @@ class SdxlLoraConfig(BasePipelineConfig): """ max_grad_norm: float | None = None - """Max gradient norm for clipping. Set to None for no clipping. + """Max gradient norm for clipping. Set to null or 0 for no clipping. """ validation_prompts: list[str] = [] diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py index d348e493..b72fd5e2 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py @@ -85,15 +85,15 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): optimizer: AdamOptimizerConfig | ProdigyOptimizerConfig = AdamOptimizerConfig() text_encoder_learning_rate: float = 1e-5 - """The learning rate to use for the text encoder model. + """The learning rate to use for the text encoder model. Set to 0 to use the optimizer's default learning rate. """ unet_learning_rate: float = 1e-4 - """The learning rate to use for the UNet model. + """The learning rate to use for the UNet model. Set to 0 to use the optimizer's default learning rate. """ textual_inversion_learning_rate: float = 1e-3 - """The learning rate to use for textual inversion training of the embeddings. + """The learning rate to use for textual inversion training of the embeddings. Set to 0 to use the optimizer's default learning rate. """ lr_scheduler: Literal[ @@ -187,7 +187,7 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): """ max_grad_norm: float | None = None - """Max gradient norm for clipping. Set to None for no clipping. + """Max gradient norm for clipping. Set to null or 0 for no clipping. """ validation_prompts: list[str] = [] diff --git a/src/invoke_training/ui/config_groups/flux_lora_config_group.py b/src/invoke_training/ui/config_groups/flux_lora_config_group.py index 69b6c93f..2648e446 100644 --- a/src/invoke_training/ui/config_groups/flux_lora_config_group.py +++ b/src/invoke_training/ui/config_groups/flux_lora_config_group.py @@ -67,7 +67,7 @@ def __init__(self): with gr.Row(): self.transformer_learning_rate = gr.Number( label="Transformer Learning Rate", - info="The transformer learning rate. If None, then it is inherited from the base optimizer " + info="The transformer learning rate. Set to 0 or leave empty to inherit from the base optimizer " "learning rate.", interactive=True, ) @@ -103,7 +103,7 @@ def __init__(self): ) self.max_grad_norm = gr.Number( label="Max Gradient Norm", - info="Max gradient norm for clipping. Set to None for no clipping.", + info="Max gradient norm for clipping. Set to 0 or leave empty for no clipping (null).", interactive=True, ) self.train_batch_size = gr.Number( @@ -314,12 +314,14 @@ def safe_pop(component, default=None): new_config.model = safe_pop(self.model, new_config.model) new_config.train_transformer = safe_pop(self.train_transformer, new_config.train_transformer) new_config.train_text_encoder = safe_pop(self.train_text_encoder, new_config.train_text_encoder) - new_config.transformer_learning_rate = safe_pop( + transformer_lr_value = safe_pop( self.transformer_learning_rate, new_config.transformer_learning_rate ) - new_config.text_encoder_learning_rate = safe_pop( + new_config.transformer_learning_rate = None if transformer_lr_value == 0 else transformer_lr_value + text_encoder_lr_value = safe_pop( self.text_encoder_learning_rate, new_config.text_encoder_learning_rate ) + new_config.text_encoder_learning_rate = None if text_encoder_lr_value == 0 else text_encoder_lr_value new_config.gradient_accumulation_steps = safe_pop( self.gradient_accumulation_steps, new_config.gradient_accumulation_steps ) @@ -330,7 +332,8 @@ def safe_pop(component, default=None): new_config.lora_rank_dim = safe_pop(self.lora_rank_dim, new_config.lora_rank_dim) new_config.min_snr_gamma = safe_pop(self.min_snr_gamma, new_config.min_snr_gamma) - new_config.max_grad_norm = safe_pop(self.max_grad_norm, new_config.max_grad_norm) + max_grad_norm_value = safe_pop(self.max_grad_norm, new_config.max_grad_norm) + new_config.max_grad_norm = None if max_grad_norm_value == 0 else max_grad_norm_value new_config.train_batch_size = safe_pop(self.train_batch_size, new_config.train_batch_size) new_config.weight_dtype = safe_pop(self.weight_dtype, new_config.weight_dtype) new_config.mixed_precision = safe_pop(self.mixed_precision, new_config.mixed_precision) diff --git a/src/invoke_training/ui/config_groups/sd_lora_config_group.py b/src/invoke_training/ui/config_groups/sd_lora_config_group.py index e77a4048..62364ac5 100644 --- a/src/invoke_training/ui/config_groups/sd_lora_config_group.py +++ b/src/invoke_training/ui/config_groups/sd_lora_config_group.py @@ -109,12 +109,12 @@ def __init__(self): with gr.Row(): self.unet_learning_rate = gr.Number( label="UNet Learning Rate", - info="The UNet learning rate. If None, then it is inherited from the base optimizer learning rate.", + info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", interactive=True, ) self.text_encoder_learning_rate = gr.Number( label="Text Encoder Learning Rate", - info="The text encoder learning rate. If None, then it is inherited from the base optimizer " + info="The text encoder learning rate. Set to 0 or leave empty to inherit from the base optimizer " "learning rate.", interactive=True, ) @@ -153,7 +153,7 @@ def __init__(self): ) self.max_grad_norm = gr.Number( label="Max Gradient Norm", - info="Max gradient norm for clipping. Set to None for no clipping.", + info="Max gradient norm for clipping. Set to 0 or leave empty for no clipping (null).", interactive=True, ) self.train_batch_size = gr.Number( @@ -223,13 +223,16 @@ def update_config_with_ui_component_data( new_config.hf_variant = ui_data.pop(self.hf_variant) or None new_config.max_checkpoints = ui_data.pop(self.max_checkpoints) new_config.train_unet = ui_data.pop(self.train_unet) - new_config.unet_learning_rate = ui_data.pop(self.unet_learning_rate) + unet_lr_value = ui_data.pop(self.unet_learning_rate) + new_config.unet_learning_rate = None if unet_lr_value == 0 else unet_lr_value new_config.train_text_encoder = ui_data.pop(self.train_text_encoder) - new_config.text_encoder_learning_rate = ui_data.pop(self.text_encoder_learning_rate) + text_encoder_lr_value = ui_data.pop(self.text_encoder_learning_rate) + new_config.text_encoder_learning_rate = None if text_encoder_lr_value == 0 else text_encoder_lr_value new_config.lr_scheduler = ui_data.pop(self.lr_scheduler) new_config.lr_warmup_steps = ui_data.pop(self.lr_warmup_steps) new_config.use_masks = ui_data.pop(self.use_masks) - new_config.max_grad_norm = ui_data.pop(self.max_grad_norm) + max_grad_norm_value = ui_data.pop(self.max_grad_norm) + new_config.max_grad_norm = None if max_grad_norm_value == 0 else max_grad_norm_value new_config.train_batch_size = ui_data.pop(self.train_batch_size) new_config.cache_text_encoder_outputs = ui_data.pop(self.cache_text_encoder_outputs) new_config.cache_vae_outputs = ui_data.pop(self.cache_vae_outputs) diff --git a/src/invoke_training/ui/config_groups/sd_textual_inversion_config_group.py b/src/invoke_training/ui/config_groups/sd_textual_inversion_config_group.py index 55fa9dd8..484159d6 100644 --- a/src/invoke_training/ui/config_groups/sd_textual_inversion_config_group.py +++ b/src/invoke_training/ui/config_groups/sd_textual_inversion_config_group.py @@ -154,7 +154,7 @@ def __init__(self): ) self.max_grad_norm = gr.Number( label="Max Gradient Norm", - info="Max gradient norm for clipping. Set to None for no clipping.", + info="Max gradient norm for clipping. Set to 0 or leave empty for no clipping (null).", interactive=True, ) self.train_batch_size = gr.Number( @@ -230,7 +230,8 @@ def update_config_with_ui_component_data( new_config.lr_scheduler = ui_data.pop(self.lr_scheduler) new_config.lr_warmup_steps = ui_data.pop(self.lr_warmup_steps) new_config.use_masks = ui_data.pop(self.use_masks) - new_config.max_grad_norm = ui_data.pop(self.max_grad_norm) + max_grad_norm_value = ui_data.pop(self.max_grad_norm) + new_config.max_grad_norm = None if max_grad_norm_value == 0 else max_grad_norm_value new_config.train_batch_size = ui_data.pop(self.train_batch_size) new_config.cache_vae_outputs = ui_data.pop(self.cache_vae_outputs) new_config.enable_cpu_offload_during_validation = ui_data.pop(self.enable_cpu_offload_during_validation) diff --git a/src/invoke_training/ui/config_groups/sdxl_finetune_config_group.py b/src/invoke_training/ui/config_groups/sdxl_finetune_config_group.py index 363c318e..572151c2 100644 --- a/src/invoke_training/ui/config_groups/sdxl_finetune_config_group.py +++ b/src/invoke_training/ui/config_groups/sdxl_finetune_config_group.py @@ -151,7 +151,7 @@ def __init__(self): ) self.max_grad_norm = gr.Number( label="Max Gradient Norm", - info="Max gradient norm for clipping. Set to None for no clipping.", + info="Max gradient norm for clipping. Set to 0 or leave empty for no clipping (null).", interactive=True, ) self.train_batch_size = gr.Number( @@ -227,7 +227,8 @@ def update_config_with_ui_component_data( new_config.lr_warmup_steps = ui_data.pop(self.lr_warmup_steps) new_config.use_masks = ui_data.pop(self.use_masks) new_config.min_snr_gamma = ui_data.pop(self.min_snr_gamma) - new_config.max_grad_norm = ui_data.pop(self.max_grad_norm) + max_grad_norm_value = ui_data.pop(self.max_grad_norm) + new_config.max_grad_norm = None if max_grad_norm_value == 0 else max_grad_norm_value new_config.train_batch_size = ui_data.pop(self.train_batch_size) new_config.cache_text_encoder_outputs = ui_data.pop(self.cache_text_encoder_outputs) new_config.cache_vae_outputs = ui_data.pop(self.cache_vae_outputs) diff --git a/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py b/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py index 657c8ebb..2edaea7c 100644 --- a/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py +++ b/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py @@ -145,10 +145,20 @@ def __init__(self): self.train_text_encoder = gr.Checkbox(label="Train Text Encoder", interactive=True) self.train_ti = gr.Checkbox(label="Train Textual Inversion Token", scale=2, interactive=True) with gr.Row(): - self.unet_learning_rate = gr.Number(label="UNet Learning Rate", interactive=True) - self.text_encoder_learning_rate = gr.Number(label="Text Encoder Learning Rate", interactive=True) + self.unet_learning_rate = gr.Number( + label="UNet Learning Rate", + info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + interactive=True, + ) + self.text_encoder_learning_rate = gr.Number( + label="Text Encoder Learning Rate", + info="The text encoder learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + interactive=True, + ) self.textual_inversion_learning_rate = gr.Number( - label="Textual Inversion Learning Rate", interactive=True + label="Textual Inversion Learning Rate", + info="The textual inversion learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + interactive=True, ) self.ti_train_steps_ratio = gr.Number(label="Textual Inversion Train Steps Ratio", interactive=True) with gr.Row(): @@ -186,7 +196,7 @@ def __init__(self): ) self.max_grad_norm = gr.Number( label="Max Gradient Norm", - info="Max gradient norm for clipping. Set to None for no clipping.", + info="Max gradient norm for clipping. Set to 0 or leave empty for no clipping (null).", interactive=True, ) self.train_batch_size = gr.Number( @@ -273,14 +283,18 @@ def update_config_with_ui_component_data( new_config.train_unet = ui_data.pop(self.train_unet) new_config.train_text_encoder = ui_data.pop(self.train_text_encoder) new_config.train_ti = ui_data.pop(self.train_ti) - new_config.unet_learning_rate = ui_data.pop(self.unet_learning_rate) - new_config.text_encoder_learning_rate = ui_data.pop(self.text_encoder_learning_rate) - new_config.textual_inversion_learning_rate = ui_data.pop(self.textual_inversion_learning_rate) + unet_lr_value = ui_data.pop(self.unet_learning_rate) + new_config.unet_learning_rate = None if unet_lr_value == 0 else unet_lr_value + text_encoder_lr_value = ui_data.pop(self.text_encoder_learning_rate) + new_config.text_encoder_learning_rate = None if text_encoder_lr_value == 0 else text_encoder_lr_value + ti_lr_value = ui_data.pop(self.textual_inversion_learning_rate) + new_config.textual_inversion_learning_rate = None if ti_lr_value == 0 else ti_lr_value new_config.ti_train_steps_ratio = ui_data.pop(self.ti_train_steps_ratio) new_config.lr_scheduler = ui_data.pop(self.lr_scheduler) new_config.lr_warmup_steps = ui_data.pop(self.lr_warmup_steps) new_config.use_masks = ui_data.pop(self.use_masks) - new_config.max_grad_norm = ui_data.pop(self.max_grad_norm) + max_grad_norm_value = ui_data.pop(self.max_grad_norm) + new_config.max_grad_norm = None if max_grad_norm_value == 0 else max_grad_norm_value new_config.train_batch_size = ui_data.pop(self.train_batch_size) new_config.cache_text_encoder_outputs = ui_data.pop(self.cache_text_encoder_outputs) new_config.cache_vae_outputs = ui_data.pop(self.cache_vae_outputs) diff --git a/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py b/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py index 70ad8bf2..bbee2693 100644 --- a/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py +++ b/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py @@ -115,12 +115,12 @@ def __init__(self): with gr.Row(): self.unet_learning_rate = gr.Number( label="UNet Learning Rate", - info="The UNet learning rate. If None, then it is inherited from the base optimizer learning rate.", + info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", interactive=True, ) self.text_encoder_learning_rate = gr.Number( label="Text Encoder Learning Rate", - info="The text encoder learning rate. If None, then it is inherited from the base optimizer " + info="The text encoder learning rate. Set to 0 or leave empty to inherit from the base optimizer " "learning rate.", interactive=True, ) @@ -159,7 +159,7 @@ def __init__(self): ) self.max_grad_norm = gr.Number( label="Max Gradient Norm", - info="Max gradient norm for clipping. Set to None for no clipping.", + info="Max gradient norm for clipping. Set to 0 or leave empty for no clipping (null).", interactive=True, ) self.train_batch_size = gr.Number( @@ -233,13 +233,16 @@ def update_config_with_ui_component_data( new_config.vae_model = ui_data.pop(self.vae_model) or None new_config.max_checkpoints = ui_data.pop(self.max_checkpoints) new_config.train_unet = ui_data.pop(self.train_unet) - new_config.unet_learning_rate = ui_data.pop(self.unet_learning_rate) + unet_lr_value = ui_data.pop(self.unet_learning_rate) + new_config.unet_learning_rate = None if unet_lr_value == 0 else unet_lr_value new_config.train_text_encoder = ui_data.pop(self.train_text_encoder) - new_config.text_encoder_learning_rate = ui_data.pop(self.text_encoder_learning_rate) + text_encoder_lr_value = ui_data.pop(self.text_encoder_learning_rate) + new_config.text_encoder_learning_rate = None if text_encoder_lr_value == 0 else text_encoder_lr_value new_config.lr_scheduler = ui_data.pop(self.lr_scheduler) new_config.lr_warmup_steps = ui_data.pop(self.lr_warmup_steps) new_config.use_masks = ui_data.pop(self.use_masks) - new_config.max_grad_norm = ui_data.pop(self.max_grad_norm) + max_grad_norm_value = ui_data.pop(self.max_grad_norm) + new_config.max_grad_norm = None if max_grad_norm_value == 0 else max_grad_norm_value new_config.train_batch_size = ui_data.pop(self.train_batch_size) new_config.cache_text_encoder_outputs = ui_data.pop(self.cache_text_encoder_outputs) new_config.cache_vae_outputs = ui_data.pop(self.cache_vae_outputs) diff --git a/src/invoke_training/ui/config_groups/sdxl_textual_inversion_config_group.py b/src/invoke_training/ui/config_groups/sdxl_textual_inversion_config_group.py index 14610610..dcc477b3 100644 --- a/src/invoke_training/ui/config_groups/sdxl_textual_inversion_config_group.py +++ b/src/invoke_training/ui/config_groups/sdxl_textual_inversion_config_group.py @@ -160,7 +160,7 @@ def __init__(self): ) self.max_grad_norm = gr.Number( label="Max Gradient Norm", - info="Max gradient norm for clipping. Set to None for no clipping.", + info="Max gradient norm for clipping. Set to 0 or leave empty for no clipping (null).", interactive=True, ) self.train_batch_size = gr.Number( @@ -238,7 +238,8 @@ def update_config_with_ui_component_data( new_config.lr_scheduler = ui_data.pop(self.lr_scheduler) new_config.lr_warmup_steps = ui_data.pop(self.lr_warmup_steps) new_config.use_masks = ui_data.pop(self.use_masks) - new_config.max_grad_norm = ui_data.pop(self.max_grad_norm) + max_grad_norm_value = ui_data.pop(self.max_grad_norm) + new_config.max_grad_norm = None if max_grad_norm_value == 0 else max_grad_norm_value new_config.train_batch_size = ui_data.pop(self.train_batch_size) new_config.cache_vae_outputs = ui_data.pop(self.cache_vae_outputs) new_config.enable_cpu_offload_during_validation = ui_data.pop(self.enable_cpu_offload_during_validation) From 1b757eb10b905822ed574384e0f4e9e2be41d419 Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 13:28:50 -0400 Subject: [PATCH 02/10] update last config --- .../lora_and_textual_inversion/config.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py index b72fd5e2..dd390841 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py @@ -84,16 +84,16 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): optimizer: AdamOptimizerConfig | ProdigyOptimizerConfig = AdamOptimizerConfig() - text_encoder_learning_rate: float = 1e-5 - """The learning rate to use for the text encoder model. Set to 0 to use the optimizer's default learning rate. + text_encoder_learning_rate: float | None = 1e-5 + """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning rate. """ - unet_learning_rate: float = 1e-4 - """The learning rate to use for the UNet model. Set to 0 to use the optimizer's default learning rate. + unet_learning_rate: float | None = 1e-4 + """The learning rate to use for the UNet model. Set to null or 0 to use the optimizer's default learning rate. """ - textual_inversion_learning_rate: float = 1e-3 - """The learning rate to use for textual inversion training of the embeddings. Set to 0 to use the optimizer's default learning rate. + textual_inversion_learning_rate: float | None = 1e-3 + """The learning rate to use for textual inversion training of the embeddings. Set to null or 0 to use the optimizer's default learning rate. """ lr_scheduler: Literal[ From aa5adf799a7e57e89fde4f89bb04154933138f0c Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 13:34:24 -0400 Subject: [PATCH 03/10] fix lint --- pyproject.toml | 2 +- .../lora_and_textual_inversion/config.py | 3 ++- .../ui/config_groups/flux_lora_config_group.py | 8 ++------ .../ui/config_groups/sd_lora_config_group.py | 3 ++- .../sdxl_lora_and_textual_inversion_config_group.py | 9 ++++++--- .../ui/config_groups/sdxl_lora_config_group.py | 3 ++- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1de0410b..afbbe709 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,7 @@ dependencies = [ [tool.ruff] src = ["src"] -select = ["E", "F", "W", "C9", "N8", "I"] +lint.select = ["E", "F", "W", "C9", "N8", "I"] target-version = "py39" line-length = 120 diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py index dd390841..dcd70c92 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py @@ -93,7 +93,8 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): """ textual_inversion_learning_rate: float | None = 1e-3 - """The learning rate to use for textual inversion training of the embeddings. Set to null or 0 to use the optimizer's default learning rate. + """The learning rate to use for textual inversion training of the embeddings. Set to null or 0 to use the + optimizer's default learning rate. """ lr_scheduler: Literal[ diff --git a/src/invoke_training/ui/config_groups/flux_lora_config_group.py b/src/invoke_training/ui/config_groups/flux_lora_config_group.py index 2648e446..10af605c 100644 --- a/src/invoke_training/ui/config_groups/flux_lora_config_group.py +++ b/src/invoke_training/ui/config_groups/flux_lora_config_group.py @@ -314,13 +314,9 @@ def safe_pop(component, default=None): new_config.model = safe_pop(self.model, new_config.model) new_config.train_transformer = safe_pop(self.train_transformer, new_config.train_transformer) new_config.train_text_encoder = safe_pop(self.train_text_encoder, new_config.train_text_encoder) - transformer_lr_value = safe_pop( - self.transformer_learning_rate, new_config.transformer_learning_rate - ) + transformer_lr_value = safe_pop(self.transformer_learning_rate, new_config.transformer_learning_rate) new_config.transformer_learning_rate = None if transformer_lr_value == 0 else transformer_lr_value - text_encoder_lr_value = safe_pop( - self.text_encoder_learning_rate, new_config.text_encoder_learning_rate - ) + text_encoder_lr_value = safe_pop(self.text_encoder_learning_rate, new_config.text_encoder_learning_rate) new_config.text_encoder_learning_rate = None if text_encoder_lr_value == 0 else text_encoder_lr_value new_config.gradient_accumulation_steps = safe_pop( self.gradient_accumulation_steps, new_config.gradient_accumulation_steps diff --git a/src/invoke_training/ui/config_groups/sd_lora_config_group.py b/src/invoke_training/ui/config_groups/sd_lora_config_group.py index 62364ac5..e3ad87cd 100644 --- a/src/invoke_training/ui/config_groups/sd_lora_config_group.py +++ b/src/invoke_training/ui/config_groups/sd_lora_config_group.py @@ -109,7 +109,8 @@ def __init__(self): with gr.Row(): self.unet_learning_rate = gr.Number( label="UNet Learning Rate", - info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer " + "learning rate.", interactive=True, ) self.text_encoder_learning_rate = gr.Number( diff --git a/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py b/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py index 2edaea7c..f9b1dba0 100644 --- a/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py +++ b/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py @@ -147,17 +147,20 @@ def __init__(self): with gr.Row(): self.unet_learning_rate = gr.Number( label="UNet Learning Rate", - info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer " + "learning rate.", interactive=True, ) self.text_encoder_learning_rate = gr.Number( label="Text Encoder Learning Rate", - info="The text encoder learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + info="The text encoder learning rate. Set to 0 or leave empty to inherit from the base optimizer " + "learning rate.", interactive=True, ) self.textual_inversion_learning_rate = gr.Number( label="Textual Inversion Learning Rate", - info="The textual inversion learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + info="The textual inversion learning rate. Set to 0 or leave empty to inherit from the base optimizer " + "learning rate.", interactive=True, ) self.ti_train_steps_ratio = gr.Number(label="Textual Inversion Train Steps Ratio", interactive=True) diff --git a/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py b/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py index bbee2693..be111193 100644 --- a/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py +++ b/src/invoke_training/ui/config_groups/sdxl_lora_config_group.py @@ -115,7 +115,8 @@ def __init__(self): with gr.Row(): self.unet_learning_rate = gr.Number( label="UNet Learning Rate", - info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer learning rate.", + info="The UNet learning rate. Set to 0 or leave empty to inherit from the base optimizer " + "learning rate.", interactive=True, ) self.text_encoder_learning_rate = gr.Number( From 4de3682a44fc222ce43e84dd9ca1dfd095e5a5fc Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 13:48:28 -0400 Subject: [PATCH 04/10] Fix Flux Config --- .../ui/config_groups/flux_lora_config_group.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/invoke_training/ui/config_groups/flux_lora_config_group.py b/src/invoke_training/ui/config_groups/flux_lora_config_group.py index 10af605c..b9b49819 100644 --- a/src/invoke_training/ui/config_groups/flux_lora_config_group.py +++ b/src/invoke_training/ui/config_groups/flux_lora_config_group.py @@ -313,11 +313,9 @@ def safe_pop(component, default=None): # Set basic properties new_config.model = safe_pop(self.model, new_config.model) new_config.train_transformer = safe_pop(self.train_transformer, new_config.train_transformer) - new_config.train_text_encoder = safe_pop(self.train_text_encoder, new_config.train_text_encoder) + # Note: train_text_encoder and text_encoder_learning_rate are not supported for Flux LoRA transformer_lr_value = safe_pop(self.transformer_learning_rate, new_config.transformer_learning_rate) new_config.transformer_learning_rate = None if transformer_lr_value == 0 else transformer_lr_value - text_encoder_lr_value = safe_pop(self.text_encoder_learning_rate, new_config.text_encoder_learning_rate) - new_config.text_encoder_learning_rate = None if text_encoder_lr_value == 0 else text_encoder_lr_value new_config.gradient_accumulation_steps = safe_pop( self.gradient_accumulation_steps, new_config.gradient_accumulation_steps ) From e378224910d655f07dbd9ba4d4adb8f0ab71976d Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 13:49:59 -0400 Subject: [PATCH 05/10] ruff --- .../stable_diffusion_xl/lora_and_textual_inversion/config.py | 3 +-- .../sdxl_lora_and_textual_inversion_config_group.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py index dcd70c92..266dc057 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py @@ -85,8 +85,7 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): optimizer: AdamOptimizerConfig | ProdigyOptimizerConfig = AdamOptimizerConfig() text_encoder_learning_rate: float | None = 1e-5 - """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning rate. - """ + """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning rate.""" unet_learning_rate: float | None = 1e-4 """The learning rate to use for the UNet model. Set to null or 0 to use the optimizer's default learning rate. diff --git a/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py b/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py index f9b1dba0..2bb041c0 100644 --- a/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py +++ b/src/invoke_training/ui/config_groups/sdxl_lora_and_textual_inversion_config_group.py @@ -159,8 +159,8 @@ def __init__(self): ) self.textual_inversion_learning_rate = gr.Number( label="Textual Inversion Learning Rate", - info="The textual inversion learning rate. Set to 0 or leave empty to inherit from the base optimizer " - "learning rate.", + info="The textual inversion learning rate. Set to 0 or leave empty to inherit from the base " + "optimizer learning rate.", interactive=True, ) self.ti_train_steps_ratio = gr.Number(label="Textual Inversion Train Steps Ratio", interactive=True) From 6627e75ef8582d76b79fa8e30c5321603b111f30 Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 13:56:58 -0400 Subject: [PATCH 06/10] fix ruff, again. --- .../stable_diffusion_xl/lora_and_textual_inversion/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py index 266dc057..dcd70c92 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py @@ -85,7 +85,8 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): optimizer: AdamOptimizerConfig | ProdigyOptimizerConfig = AdamOptimizerConfig() text_encoder_learning_rate: float | None = 1e-5 - """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning rate.""" + """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning rate. + """ unet_learning_rate: float | None = 1e-4 """The learning rate to use for the UNet model. Set to null or 0 to use the optimizer's default learning rate. From f534cc89cd850422e7531e885d3aaba5697e9f71 Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 14:13:39 -0400 Subject: [PATCH 07/10] now ruff? --- .../stable_diffusion/textual_inversion/train.py | 12 +++++------- .../pipelines/stable_diffusion_xl/lora/train.py | 6 +++--- .../stable_diffusion_xl/textual_inversion/train.py | 12 +++++------- src/invoke_training/ui/gradio_blocks/pipeline_tab.py | 12 +++++------- 4 files changed, 18 insertions(+), 24 deletions(-) diff --git a/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py b/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py index 9400d281..cbc529f2 100644 --- a/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py +++ b/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py @@ -90,13 +90,11 @@ def _initialize_placeholder_tokens( - Initialize the new token embeddings from either an existing token, or an initial TI embedding file. """ if ( - sum( - [ - config.initializer_token is not None, - config.initial_embedding_file is not None, - config.initial_phrase is not None, - ] - ) + sum([ + config.initializer_token is not None, + config.initial_embedding_file is not None, + config.initial_phrase is not None, + ]) != 1 ): raise ValueError( diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py index 489e7a9e..543401a1 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py @@ -254,9 +254,9 @@ def compute_time_ids(original_size, crops_coords_top_left): add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) return add_time_ids - add_time_ids = torch.cat( - [compute_time_ids(s, c) for s, c in zip(data_batch["original_size_hw"], data_batch["crop_top_left_yx"])] - ) + add_time_ids = torch.cat([ + compute_time_ids(s, c) for s, c in zip(data_batch["original_size_hw"], data_batch["crop_top_left_yx"]) + ]) unet_conditions = {"time_ids": add_time_ids} # Get the text embedding for conditioning. diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py b/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py index 3ddb0515..1c88eef7 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py @@ -106,13 +106,11 @@ def _initialize_placeholder_tokens( """ if ( - sum( - [ - getattr(config, "initializer_token", None) is not None, - getattr(config, "initial_embedding_file", None) is not None, - getattr(config, "initial_phrase", None) is not None, - ] - ) + sum([ + getattr(config, "initializer_token", None) is not None, + getattr(config, "initial_embedding_file", None) is not None, + getattr(config, "initial_phrase", None) is not None, + ]) != 1 ): raise ValueError( diff --git a/src/invoke_training/ui/gradio_blocks/pipeline_tab.py b/src/invoke_training/ui/gradio_blocks/pipeline_tab.py index 3c2f8611..b2d82f7a 100644 --- a/src/invoke_training/ui/gradio_blocks/pipeline_tab.py +++ b/src/invoke_training/ui/gradio_blocks/pipeline_tab.py @@ -140,13 +140,11 @@ def on_generate_config_button_click(self, data: dict): # (in case some values were rounded or otherwise modified # in the process). update_dict = self.pipeline_config_group.update_ui_components_with_config_data(self._current_config) - update_dict.update( - { - self._config_yaml: yaml.safe_dump( - self._current_config.model_dump(), default_flow_style=False, sort_keys=False - ) - } - ) + update_dict.update({ + self._config_yaml: yaml.safe_dump( + self._current_config.model_dump(), default_flow_style=False, sort_keys=False + ) + }) return update_dict except Exception as e: print(f"Error generating config: {e}") From cd59a12c5db9016b445f3367df2ad0e00927fcbc Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 14:26:14 -0400 Subject: [PATCH 08/10] fix length --- .../stable_diffusion_xl/lora_and_textual_inversion/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py index dcd70c92..13c34b6a 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py @@ -85,7 +85,8 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): optimizer: AdamOptimizerConfig | ProdigyOptimizerConfig = AdamOptimizerConfig() text_encoder_learning_rate: float | None = 1e-5 - """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning rate. + """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning + rate. """ unet_learning_rate: float | None = 1e-4 From 273052fd35c83a1924d3d7a268fbea3f6d03a8e4 Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 14:40:58 -0400 Subject: [PATCH 09/10] final --- .../stable_diffusion_xl/lora_and_textual_inversion/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py index 13c34b6a..a2cc6876 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora_and_textual_inversion/config.py @@ -85,7 +85,7 @@ class SdxlLoraAndTextualInversionConfig(BasePipelineConfig): optimizer: AdamOptimizerConfig | ProdigyOptimizerConfig = AdamOptimizerConfig() text_encoder_learning_rate: float | None = 1e-5 - """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning + """The learning rate to use for the text encoder model. Set to null or 0 to use the optimizer's default learning rate. """ From e685da2b2265104170a3cbb2b1e35d8626a1e672 Mon Sep 17 00:00:00 2001 From: Kent Keirsey Date: Tue, 14 Oct 2025 16:51:30 -0400 Subject: [PATCH 10/10] format updates --- .../stable_diffusion/textual_inversion/train.py | 12 +++++++----- .../pipelines/stable_diffusion_xl/lora/train.py | 6 +++--- .../stable_diffusion_xl/textual_inversion/train.py | 12 +++++++----- src/invoke_training/ui/gradio_blocks/pipeline_tab.py | 12 +++++++----- 4 files changed, 24 insertions(+), 18 deletions(-) diff --git a/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py b/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py index cbc529f2..9400d281 100644 --- a/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py +++ b/src/invoke_training/pipelines/stable_diffusion/textual_inversion/train.py @@ -90,11 +90,13 @@ def _initialize_placeholder_tokens( - Initialize the new token embeddings from either an existing token, or an initial TI embedding file. """ if ( - sum([ - config.initializer_token is not None, - config.initial_embedding_file is not None, - config.initial_phrase is not None, - ]) + sum( + [ + config.initializer_token is not None, + config.initial_embedding_file is not None, + config.initial_phrase is not None, + ] + ) != 1 ): raise ValueError( diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py b/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py index 543401a1..489e7a9e 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/lora/train.py @@ -254,9 +254,9 @@ def compute_time_ids(original_size, crops_coords_top_left): add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) return add_time_ids - add_time_ids = torch.cat([ - compute_time_ids(s, c) for s, c in zip(data_batch["original_size_hw"], data_batch["crop_top_left_yx"]) - ]) + add_time_ids = torch.cat( + [compute_time_ids(s, c) for s, c in zip(data_batch["original_size_hw"], data_batch["crop_top_left_yx"])] + ) unet_conditions = {"time_ids": add_time_ids} # Get the text embedding for conditioning. diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py b/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py index 1c88eef7..3ddb0515 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/textual_inversion/train.py @@ -106,11 +106,13 @@ def _initialize_placeholder_tokens( """ if ( - sum([ - getattr(config, "initializer_token", None) is not None, - getattr(config, "initial_embedding_file", None) is not None, - getattr(config, "initial_phrase", None) is not None, - ]) + sum( + [ + getattr(config, "initializer_token", None) is not None, + getattr(config, "initial_embedding_file", None) is not None, + getattr(config, "initial_phrase", None) is not None, + ] + ) != 1 ): raise ValueError( diff --git a/src/invoke_training/ui/gradio_blocks/pipeline_tab.py b/src/invoke_training/ui/gradio_blocks/pipeline_tab.py index b2d82f7a..3c2f8611 100644 --- a/src/invoke_training/ui/gradio_blocks/pipeline_tab.py +++ b/src/invoke_training/ui/gradio_blocks/pipeline_tab.py @@ -140,11 +140,13 @@ def on_generate_config_button_click(self, data: dict): # (in case some values were rounded or otherwise modified # in the process). update_dict = self.pipeline_config_group.update_ui_components_with_config_data(self._current_config) - update_dict.update({ - self._config_yaml: yaml.safe_dump( - self._current_config.model_dump(), default_flow_style=False, sort_keys=False - ) - }) + update_dict.update( + { + self._config_yaml: yaml.safe_dump( + self._current_config.model_dump(), default_flow_style=False, sort_keys=False + ) + } + ) return update_dict except Exception as e: print(f"Error generating config: {e}")