Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
122 commits
Select commit Hold shift + click to select a range
4ae6c90
feat(mm): add UnknownModelConfig
psychedelicious Sep 18, 2025
b26ab0b
refactor(ui): move model categorisation-ish logic to central location…
psychedelicious Sep 18, 2025
7cc7d06
refactor(ui)refactor(ui): more cleanup of model categories
psychedelicious Sep 18, 2025
0a3e6d3
refactor(ui): remove unused excludeSubmodels
psychedelicious Sep 18, 2025
8474fd8
feat(nodes): add unknown as model base
psychedelicious Sep 18, 2025
0f5beec
chore(ui): typegen
psychedelicious Sep 18, 2025
eb6b3b8
feat(ui): add unknown model base support in ui
psychedelicious Sep 18, 2025
6e9e8d6
feat(ui): allow changing model type in MM, fix up base and variant se…
psychedelicious Sep 18, 2025
9e509ff
feat(mm): omit model description instead of making it "base type file…
psychedelicious Sep 18, 2025
64aaf98
feat(app): add setting to allow unknown models
psychedelicious Sep 18, 2025
51b2297
feat(ui): allow changing model format in MM
psychedelicious Sep 18, 2025
62c456a
feat(app): add the installed model config to install complete events
psychedelicious Sep 18, 2025
facb026
chore(ui): typegen
psychedelicious Sep 18, 2025
93a170a
feat(ui): toast warning when installed model is unidentified
psychedelicious Sep 18, 2025
4897eeb
docs: update config docstrings
psychedelicious Sep 18, 2025
08853f9
chore(ui): typegen
psychedelicious Sep 18, 2025
7cdc821
tests(mm): fix test for MM, leave the UnknownModelConfig class in the…
psychedelicious Sep 18, 2025
ca3ccf9
tidy(ui): prefer types from zod schemas for model attrs
psychedelicious Sep 18, 2025
092cff3
chore(ui): lint
psychedelicious Sep 18, 2025
84e4d31
fix(ui): wrong translation string
psychedelicious Sep 18, 2025
6f08a2b
feat(mm): normalized model storage
psychedelicious Sep 18, 2025
6294c29
feat(mm): add migration to flat model storage
psychedelicious Sep 18, 2025
a8009b4
fix(mm): normalized multi-file/diffusers model installation no worky
psychedelicious Sep 19, 2025
e72c78f
refactor: port MM probes to new api
psychedelicious Sep 23, 2025
8036bb0
feat(mm): port TIs to new API
psychedelicious Sep 23, 2025
9469bb0
tidy(mm): remove unused probes
psychedelicious Sep 23, 2025
8a14175
feat(mm): port spandrel to new API
psychedelicious Sep 23, 2025
f852c03
fix(mm): parsing for spandrel
psychedelicious Sep 23, 2025
4ae20f4
fix(mm): loader for clip embed
psychedelicious Sep 23, 2025
73b6fae
fix(mm): tis use existing weight_files method
psychedelicious Sep 23, 2025
06dcd29
feat(mm): port vae to new API
psychedelicious Sep 23, 2025
8a6d5f4
fix(mm): vae class inheritance and config_path
psychedelicious Sep 23, 2025
4e2145c
tidy(mm): patcher types and import paths
psychedelicious Sep 23, 2025
4b1450a
feat(mm): better errors when invalid model config found in db
psychedelicious Sep 23, 2025
250163e
feat(mm): port t5 to new API
psychedelicious Sep 23, 2025
0fd5868
feat(mm): make config_path optional
psychedelicious Sep 23, 2025
8399de9
refactor(mm): simplify model classification process
psychedelicious Sep 24, 2025
fd47da6
refactor(mm): remove unused methods in config.py
psychedelicious Sep 24, 2025
3488975
refactor(mm): add model config parsing utils
psychedelicious Sep 24, 2025
d4823b6
fix(mm): abstractmethod bork
psychedelicious Sep 24, 2025
bbecc86
tidy(mm): clarify that model id utils are private
psychedelicious Sep 24, 2025
e1a54ba
fix(mm): fall back to UnknownModelConfig correctly
psychedelicious Sep 24, 2025
036ab04
feat(mm): port CLIPVisionDiffusersConfig to new api
psychedelicious Sep 24, 2025
d89472d
feat(mm): port SigLIPDiffusersConfig to new api
psychedelicious Sep 24, 2025
3b606b6
feat(mm): make match helpers more succint
psychedelicious Sep 24, 2025
a35a49f
feat(mm): port flux redux to new api
psychedelicious Sep 24, 2025
d185b85
feat(mm): port ip adapter to new api
psychedelicious Sep 24, 2025
7ca0a0a
tidy(mm): skip optimistic override handling for now
psychedelicious Sep 24, 2025
eaddd6f
refactor(mm): continue iterating on config
psychedelicious Sep 25, 2025
a118700
feat(mm): port flux "control lora" and t2i adapter to new api
psychedelicious Sep 25, 2025
eb1ed24
tidy(ui): use Extract to get model config types
psychedelicious Sep 25, 2025
96bbd8a
fix(mm): t2i base determination
psychedelicious Sep 25, 2025
925698a
feat(mm): port cnet to new api
psychedelicious Sep 25, 2025
9745c25
refactor(mm): add config validation utils, make it all consistent and…
psychedelicious Sep 25, 2025
934b3f8
feat(mm): wip port of main models to new api
psychedelicious Sep 25, 2025
395b7d8
feat(mm): wip port of main models to new api
psychedelicious Sep 25, 2025
f5cbf60
feat(mm): wip port of main models to new api
psychedelicious Sep 25, 2025
111782d
docs(mm): add todos
psychedelicious Sep 26, 2025
044648f
tidy(mm): removed unused model merge class
psychedelicious Sep 29, 2025
951635f
feat(mm): wip port main models to new api
psychedelicious Sep 29, 2025
c53c731
tidy(mm): clean up model heuristic utils
psychedelicious Oct 1, 2025
a0a4eb9
tidy(mm): clean up ModelOnDisk caching
psychedelicious Oct 1, 2025
c065655
tidy(mm): flux lora format util
psychedelicious Oct 1, 2025
af30525
refactor(mm): make config classes narrow
psychedelicious Oct 1, 2025
ee58083
refactor(mm): diffusers loras
psychedelicious Oct 1, 2025
4ded5b5
feat(mm): consistent naming for all model config classes
psychedelicious Oct 1, 2025
e48e354
fix(mm): tag generation & scattered probe fixes
psychedelicious Oct 1, 2025
edfd90f
tidy(mm): consistent class names
psychedelicious Oct 2, 2025
9faffe9
refactor(mm): split configs into separate files
psychedelicious Oct 3, 2025
e23ac6d
docs(mm): add comments for identification utils
psychedelicious Oct 6, 2025
0214afc
chore(ui): typegen
psychedelicious Oct 6, 2025
13b2f9d
refactor(mm): remove legacy probe, new configs dir structure, update …
psychedelicious Oct 7, 2025
83fe40e
fix(mm): inverted condition
psychedelicious Oct 7, 2025
09fef01
docs(mm): update docsstrings in factory.py
psychedelicious Oct 7, 2025
7437a14
docs(mm): document flux variant attr
psychedelicious Oct 7, 2025
07a667a
feat(mm): add helper method for legacy configs
psychedelicious Oct 7, 2025
233b286
feat(mm): satisfy type checker in flux denoise
psychedelicious Oct 7, 2025
74e4dd4
docs(mm): remove extraneous comment
psychedelicious Oct 7, 2025
56e31ca
fix(mm): ensure unknown model configs get unknown attrs
psychedelicious Oct 7, 2025
2e5ec1c
fix(mm): t5 identification
psychedelicious Oct 7, 2025
303acdb
fix(mm): sdxl ip adapter identification
psychedelicious Oct 7, 2025
d336aa4
feat(mm): more flexible config matching utils
psychedelicious Oct 7, 2025
2561968
fix(mm): clip vision identification
psychedelicious Oct 7, 2025
01ca74e
feat(mm): add sanity checks before probing paths
psychedelicious Oct 7, 2025
99d3f16
docs(mm): add reminder for self for field migrations
psychedelicious Oct 7, 2025
b5aa315
feat(mm): clearer naming for main config class hierarchy
psychedelicious Oct 8, 2025
26dc155
feat(mm): fix clip vision starter model bases, add ref to actual models
psychedelicious Oct 8, 2025
e676b9d
feat(mm): add model config schema migration logic
psychedelicious Oct 8, 2025
34cb88e
fix(mm): duplicate import
psychedelicious Oct 8, 2025
347a33f
refactor(mm): split big migration into 3
psychedelicious Oct 8, 2025
55d7d2e
fix(mm): pop base/type/format when creating unknown model config
psychedelicious Oct 8, 2025
f57ee30
fix(db): migration 22 insert only real cols
psychedelicious Oct 8, 2025
5a681d5
fix(db): migration 23 fall back to unknown model when config change f…
psychedelicious Oct 8, 2025
17c3d15
feat(db): run migrations 23 and 24
psychedelicious Oct 8, 2025
381827f
fix(mm): false negative on flux lora
psychedelicious Oct 9, 2025
d11cb34
fix(mm): vae checkpoint probe checking for dir instead of file
psychedelicious Oct 9, 2025
fc2175a
fix(mm): ModelOnDisk skips dirs when looking for weights
psychedelicious Oct 9, 2025
d258af0
feat(mm): add method to get main model defaults from a base
psychedelicious Oct 9, 2025
4f20a0d
feat(mm): do not log when multiple non-unknown model matches
psychedelicious Oct 9, 2025
2206b28
refactor(mm): continued iteration on model identifcation
psychedelicious Oct 9, 2025
a16d0b8
tests(mm): refactor model identification tests
psychedelicious Oct 9, 2025
db2a830
fix(mm): omit type/format/base when creating unknown config instance
psychedelicious Oct 9, 2025
d81a554
feat(mm): use ValueError for model id sanity checks
psychedelicious Oct 10, 2025
adc332b
feat(mm): add flag for updating models to allow class changes
psychedelicious Oct 10, 2025
e5935a3
tests(mm): fix remaining MM tests
psychedelicious Oct 10, 2025
3af42c5
feat: allow users to edit models freely
psychedelicious Oct 10, 2025
9d9625f
feat(ui): add warning for model settings edit
psychedelicious Oct 10, 2025
ab1e15e
tests(mm): flux state dict tests
psychedelicious Oct 10, 2025
396f739
tidy: remove unused file
psychedelicious Oct 10, 2025
7ff73eb
fix(mm): lora state dict loading in model id
psychedelicious Oct 10, 2025
e811ffc
feat(ui): use translation string for model edit warning
psychedelicious Oct 10, 2025
f1ba95d
docs(db): update version numbers in migration comments
psychedelicious Oct 10, 2025
975d716
chore: bump version to v6.9.0a1
psychedelicious Oct 10, 2025
bcc4735
docs: update model id readme
psychedelicious Oct 10, 2025
abbc96d
tests(mm): attempt to fix windows model id tests
psychedelicious Oct 10, 2025
bab61fb
fix(mm): issue with deleting single file models
psychedelicious Oct 10, 2025
6cea0c0
feat(mm): just delete the dir w/ rmtree when deleting model
psychedelicious Oct 10, 2025
fc10db4
tests(mm): windows CI issue
psychedelicious Oct 10, 2025
644bb98
fix(ui): typegen schema sync
psychedelicious Oct 10, 2025
e380521
fix(mm): fixes for migration 23
psychedelicious Oct 12, 2025
7296a0c
chore: bump version to v6.9.0a2
psychedelicious Oct 12, 2025
e238b26
chore: bump version to v6.9.0a3
psychedelicious Oct 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@
* text=auto
docker/** text eol=lf
tests/test_model_probe/stripped_models/** filter=lfs diff=lfs merge=lfs -text
tests/model_identification/stripped_models/** filter=lfs diff=lfs merge=lfs -text
30 changes: 20 additions & 10 deletions invokeai/app/api/routers/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,12 @@
UnknownModelException,
)
from invokeai.app.util.suppress_output import SuppressOutput
from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelType
from invokeai.backend.model_manager.config import (
AnyModelConfig,
MainCheckpointConfig,
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.main import (
Main_Checkpoint_SD1_Config,
Main_Checkpoint_SD2_Config,
Main_Checkpoint_SDXL_Config,
Main_Checkpoint_SDXLRefiner_Config,
)
from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
Expand All @@ -44,6 +46,7 @@
StarterModelBundle,
StarterModelWithoutDependencies,
)
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType

model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"])

Expand Down Expand Up @@ -297,10 +300,8 @@ async def update_model_record(
"""Update a model's config."""
logger = ApiDependencies.invoker.services.logger
record_store = ApiDependencies.invoker.services.model_manager.store
installer = ApiDependencies.invoker.services.model_manager.install
try:
record_store.update_model(key, changes=changes)
config = installer.sync_model_path(key)
config = record_store.update_model(key, changes=changes, allow_class_change=True)
config = add_cover_image_to_model_config(config, ApiDependencies)
logger.info(f"Updated model: {key}")
except UnknownModelException as e:
Expand Down Expand Up @@ -743,9 +744,18 @@ async def convert_model(
logger.error(str(e))
raise HTTPException(status_code=424, detail=str(e))

if not isinstance(model_config, MainCheckpointConfig):
logger.error(f"The model with key {key} is not a main checkpoint model.")
raise HTTPException(400, f"The model with key {key} is not a main checkpoint model.")
if not isinstance(
model_config,
(
Main_Checkpoint_SD1_Config,
Main_Checkpoint_SD2_Config,
Main_Checkpoint_SDXL_Config,
Main_Checkpoint_SDXLRefiner_Config,
),
):
msg = f"The model with key {key} is not a main SD 1/2/XL checkpoint model."
logger.error(msg)
raise HTTPException(400, msg)

with TemporaryDirectory(dir=ApiDependencies.invoker.services.configuration.models_path) as tmpdir:
convert_path = pathlib.Path(tmpdir) / pathlib.Path(model_config.path).stem
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/cogview4_denoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
from invokeai.backend.model_manager.config import BaseModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType
from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import CogView4ConditioningInfo
Expand Down
3 changes: 1 addition & 2 deletions invokeai/app/invocations/cogview4_model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
VAEField,
)
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType


@invocation_output("cogview4_model_loader_output")
Expand Down
11 changes: 5 additions & 6 deletions invokeai/app/invocations/create_gradient_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@
from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
from invokeai.app.invocations.model import UNetField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.model_manager.config import MainConfigBase
from invokeai.backend.model_manager.taxonomy import ModelVariantType
from invokeai.backend.model_manager.taxonomy import FluxVariantType, ModelType, ModelVariantType
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor


Expand Down Expand Up @@ -182,10 +180,11 @@ def invoke(self, context: InvocationContext) -> GradientMaskOutput:
if self.unet is not None and self.vae is not None and self.image is not None:
# all three fields must be present at the same time
main_model_config = context.models.get_config(self.unet.unet.key)
assert isinstance(main_model_config, MainConfigBase)
if main_model_config.variant is ModelVariantType.Inpaint:
assert main_model_config.type is ModelType.Main
variant = getattr(main_model_config, "variant", None)
if variant is ModelVariantType.Inpaint or variant is FluxVariantType.DevFill:
mask = dilated_mask_tensor
vae_info: LoadedModel = context.models.load(self.vae.vae)
vae_info = context.models.load(self.vae.vae)
image = context.images.get_pil(self.image.image_name)
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image_tensor.dim() == 3:
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/denoise_latents.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelVariantType
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.patches.layer_patcher import LayerPatcher
Expand Down
7 changes: 4 additions & 3 deletions invokeai/app/invocations/flux_denoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
unpack,
)
from invokeai.backend.flux.text_conditioning import FluxReduxConditioning, FluxTextConditioning
from invokeai.backend.model_manager.taxonomy import ModelFormat, ModelVariantType
from invokeai.backend.model_manager.taxonomy import BaseModelType, FluxVariantType, ModelFormat, ModelType
from invokeai.backend.patches.layer_patcher import LayerPatcher
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
Expand Down Expand Up @@ -232,7 +232,8 @@ def _run_diffusion(
)

transformer_config = context.models.get_config(self.transformer.transformer)
is_schnell = "schnell" in getattr(transformer_config, "config_path", "")
assert transformer_config.base is BaseModelType.Flux and transformer_config.type is ModelType.Main
is_schnell = transformer_config.variant is FluxVariantType.Schnell

# Calculate the timestep schedule.
timesteps = get_schedule(
Expand Down Expand Up @@ -277,7 +278,7 @@ def _run_diffusion(

# Prepare the extra image conditioning tensor (img_cond) for either FLUX structural control or FLUX Fill.
img_cond: torch.Tensor | None = None
is_flux_fill = transformer_config.variant == ModelVariantType.Inpaint # type: ignore
is_flux_fill = transformer_config.variant is FluxVariantType.DevFill
if is_flux_fill:
img_cond = self._prep_flux_fill_img_cond(
context, device=TorchDevice.choose_torch_device(), dtype=inference_dtype
Expand Down
7 changes: 2 additions & 5 deletions invokeai/app/invocations/flux_ip_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,7 @@
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import (
IPAdapterCheckpointConfig,
IPAdapterInvokeAIConfig,
)
from invokeai.backend.model_manager.configs.ip_adapter import IPAdapter_Checkpoint_FLUX_Config
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType


Expand Down Expand Up @@ -68,7 +65,7 @@ def validate_begin_end_step_percent(self) -> Self:
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
ip_adapter_info = context.models.get_config(self.ip_adapter_model.key)
assert isinstance(ip_adapter_info, (IPAdapterInvokeAIConfig, IPAdapterCheckpointConfig))
assert isinstance(ip_adapter_info, IPAdapter_Checkpoint_FLUX_Config)

# Note: There is a IPAdapterInvokeAIConfig.image_encoder_model_id field, but it isn't trustworthy.
image_encoder_starter_model = CLIP_VISION_MODEL_MAP[self.clip_vision_model]
Expand Down
10 changes: 4 additions & 6 deletions invokeai/app/invocations/flux_model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,8 @@
preprocess_t5_encoder_model_identifier,
preprocess_t5_tokenizer_model_identifier,
)
from invokeai.backend.flux.util import max_seq_lengths
from invokeai.backend.model_manager.config import (
CheckpointConfigBase,
)
from invokeai.backend.flux.util import get_flux_max_seq_length
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType


Expand Down Expand Up @@ -87,12 +85,12 @@ def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
t5_encoder = preprocess_t5_encoder_model_identifier(self.t5_encoder_model)

transformer_config = context.models.get_config(transformer)
assert isinstance(transformer_config, CheckpointConfigBase)
assert isinstance(transformer_config, Checkpoint_Config_Base)

return FluxModelLoaderOutput(
transformer=TransformerField(transformer=transformer, loras=[]),
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder, loras=[]),
vae=VAEField(vae=vae),
max_seq_len=max_seq_lengths[transformer_config.config_path],
max_seq_len=get_flux_max_seq_length(transformer_config.variant),
)
4 changes: 2 additions & 2 deletions invokeai/app/invocations/flux_redux.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
from invokeai.backend.model_manager import BaseModelType, ModelType
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.starter_models import siglip
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
from invokeai.backend.util.devices import TorchDevice

Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/flux_text_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from invokeai.app.invocations.primitives import FluxConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.conditioner import HFEncoder
from invokeai.backend.model_manager import ModelFormat
from invokeai.backend.model_manager.taxonomy import ModelFormat
from invokeai.backend.patches.layer_patcher import LayerPatcher
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/flux_vae_encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/image_to_latents.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from invokeai.app.invocations.model import VAEField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
from invokeai.backend.util.devices import TorchDevice
Expand Down
12 changes: 6 additions & 6 deletions invokeai/app/invocations/ip_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import (
AnyModelConfig,
IPAdapterCheckpointConfig,
IPAdapterInvokeAIConfig,
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.ip_adapter import (
IPAdapter_Checkpoint_Config_Base,
IPAdapter_InvokeAI_Config_Base,
)
from invokeai.backend.model_manager.starter_models import (
StarterModel,
Expand Down Expand Up @@ -123,9 +123,9 @@ def validate_begin_end_step_percent(self) -> Self:
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
ip_adapter_info = context.models.get_config(self.ip_adapter_model.key)
assert isinstance(ip_adapter_info, (IPAdapterInvokeAIConfig, IPAdapterCheckpointConfig))
assert isinstance(ip_adapter_info, (IPAdapter_InvokeAI_Config_Base, IPAdapter_Checkpoint_Config_Base))

if isinstance(ip_adapter_info, IPAdapterInvokeAIConfig):
if isinstance(ip_adapter_info, IPAdapter_InvokeAI_Config_Base):
image_encoder_model_id = ip_adapter_info.image_encoder_model_id
image_encoder_model_name = image_encoder_model_id.split("/")[-1].strip()
else:
Expand Down
9 changes: 4 additions & 5 deletions invokeai/app/invocations/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager.config import (
AnyModelConfig,
)
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType


Expand All @@ -24,8 +22,9 @@ class ModelIdentifierField(BaseModel):
name: str = Field(description="The model's name")
base: BaseModelType = Field(description="The model's base model type")
type: ModelType = Field(description="The model's type")
submodel_type: Optional[SubModelType] = Field(
description="The submodel to load, if this is a main model", default=None
submodel_type: SubModelType | None = Field(
description="The submodel to load, if this is a main model",
default=None,
)

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/sd3_denoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
from invokeai.backend.model_manager import BaseModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType
from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo
Expand Down
2 changes: 2 additions & 0 deletions invokeai/app/services/config/config_default.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ class InvokeAIAppConfig(BaseSettings):
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.
allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.
"""

_root: Optional[Path] = PrivateAttr(default=None)
Expand Down Expand Up @@ -198,6 +199,7 @@ class InvokeAIAppConfig(BaseSettings):
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
scan_models_on_startup: bool = Field(default=False, description="Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.")
unsafe_disable_picklescan: bool = Field(default=False, description="UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.")
allow_unknown_models: bool = Field(default=True, description="Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.")

# fmt: on

Expand Down
4 changes: 2 additions & 2 deletions invokeai/app/services/events/events_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@
SessionQueueItem,
SessionQueueStatus,
)
from invokeai.backend.model_manager import SubModelType
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import SubModelType


class EventServiceBase:
Expand Down
13 changes: 10 additions & 3 deletions invokeai/app/services/events/events_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
)
from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput
from invokeai.app.util.misc import get_timestamp
from invokeai.backend.model_manager import SubModelType
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import SubModelType

if TYPE_CHECKING:
from invokeai.app.services.download.download_base import DownloadJob
Expand Down Expand Up @@ -546,11 +546,18 @@ class ModelInstallCompleteEvent(ModelEventBase):
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
key: str = Field(description="Model config record key")
total_bytes: Optional[int] = Field(description="Size of the model (may be None for installation of a local path)")
config: AnyModelConfig = Field(description="The installed model's config")

@classmethod
def build(cls, job: "ModelInstallJob") -> "ModelInstallCompleteEvent":
assert job.config_out is not None
return cls(id=job.id, source=job.source, key=(job.config_out.key), total_bytes=job.total_bytes)
return cls(
id=job.id,
source=job.source,
key=(job.config_out.key),
total_bytes=job.total_bytes,
config=job.config_out,
)


@payload_schema.register
Expand Down
Loading
Loading