diff --git a/invokeai/app/api/routers/app_info.py b/invokeai/app/api/routers/app_info.py
index 8f954254efd..5d66c2559ec 100644
--- a/invokeai/app/api/routers/app_info.py
+++ b/invokeai/app/api/routers/app_info.py
@@ -1,8 +1,7 @@
import typing
from enum import Enum
-from importlib.metadata import PackageNotFoundError, version
+from importlib.metadata import distributions
from pathlib import Path
-from platform import python_version
from typing import Optional
import torch
@@ -44,24 +43,6 @@ class AppVersion(BaseModel):
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
-class AppDependencyVersions(BaseModel):
- """App depencency Versions Response"""
-
- accelerate: str = Field(description="accelerate version")
- compel: str = Field(description="compel version")
- cuda: Optional[str] = Field(description="CUDA version")
- diffusers: str = Field(description="diffusers version")
- numpy: str = Field(description="Numpy version")
- opencv: str = Field(description="OpenCV version")
- onnx: str = Field(description="ONNX version")
- pillow: str = Field(description="Pillow (PIL) version")
- python: str = Field(description="Python version")
- torch: str = Field(description="PyTorch version")
- torchvision: str = Field(description="PyTorch Vision version")
- transformers: str = Field(description="transformers version")
- xformers: Optional[str] = Field(description="xformers version")
-
-
class AppConfig(BaseModel):
"""App Config Response"""
@@ -76,27 +57,19 @@ async def get_version() -> AppVersion:
return AppVersion(version=__version__)
-@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
-async def get_app_deps() -> AppDependencyVersions:
+@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
+async def get_app_deps() -> dict[str, str]:
+ deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
try:
- xformers = version("xformers")
- except PackageNotFoundError:
- xformers = None
- return AppDependencyVersions(
- accelerate=version("accelerate"),
- compel=version("compel"),
- cuda=torch.version.cuda,
- diffusers=version("diffusers"),
- numpy=version("numpy"),
- opencv=version("opencv-python"),
- onnx=version("onnx"),
- pillow=version("pillow"),
- python=python_version(),
- torch=torch.version.__version__,
- torchvision=version("torchvision"),
- transformers=version("transformers"),
- xformers=xformers,
- )
+ cuda = torch.version.cuda or "N/A"
+ except Exception:
+ cuda = "N/A"
+
+ deps["CUDA"] = cuda
+
+ sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
+
+ return sorted_deps
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py
index e760d9d09f0..f65cafc7318 100644
--- a/invokeai/app/api/routers/images.py
+++ b/invokeai/app/api/routers/images.py
@@ -99,7 +99,9 @@ async def upload_image(
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
try:
- np_image = pil_to_np(pil_image)
+ # heuristic_resize_fast expects an RGB or RGBA image
+ pil_rgba = pil_image.convert("RGBA")
+ np_image = pil_to_np(pil_rgba)
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
pil_image = np_to_pil(np_image)
except Exception:
diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py
index 22b77748cf2..2c467ca6a50 100644
--- a/invokeai/app/api_app.py
+++ b/invokeai/app/api_app.py
@@ -158,7 +158,7 @@ def overridden_redoc() -> HTMLResponse:
try:
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
except RuntimeError:
- logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
+ logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
app.mount(
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
) # docs favicon is in here
diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py
index 3e4ec0f4d67..65993acf015 100644
--- a/invokeai/app/invocations/baseinvocation.py
+++ b/invokeai/app/invocations/baseinvocation.py
@@ -499,7 +499,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
ui_type = field.json_schema_extra.get("ui_type", None)
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
- logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
+ logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
field.json_schema_extra.pop("ui_type")
return None
@@ -613,7 +613,7 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
uiconfig["version"] = version
else:
- logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
+ logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
uiconfig["version"] = "1.0.0"
cls.UIConfig = UIConfigBase(**uiconfig)
diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py
index 36c9d7b6522..3ad05bcc9b9 100644
--- a/invokeai/app/invocations/compel.py
+++ b/invokeai/app/invocations/compel.py
@@ -114,6 +114,13 @@ def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
+ del compel
+ del patched_tokenizer
+ del tokenizer
+ del ti_manager
+ del text_encoder
+ del text_encoder_info
+
c = c.detach().to("cpu")
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
@@ -222,7 +229,10 @@ def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
else:
c_pooled = None
+ del compel
+ del patched_tokenizer
del tokenizer
+ del ti_manager
del text_encoder
del text_encoder_info
diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py
index 29234f4d791..417aa8e1d17 100644
--- a/invokeai/app/invocations/fields.py
+++ b/invokeai/app/invocations/fields.py
@@ -437,7 +437,7 @@ class WithWorkflow:
workflow = None
def __init_subclass__(cls) -> None:
- logger.warn(
+ logger.warning(
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
)
super().__init_subclass__()
@@ -578,7 +578,7 @@ def InputField(
if default_factory is not _Unset and default_factory is not None:
default = default_factory()
- logger.warn('"default_factory" is not supported, calling it now to set "default"')
+ logger.warning('"default_factory" is not supported, calling it now to set "default"')
# These are the args we may wish pass to the pydantic `Field()` function
field_args = {
diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py
index ec63021c698..4dabac964b8 100644
--- a/invokeai/app/services/config/config_default.py
+++ b/invokeai/app/services/config/config_default.py
@@ -24,7 +24,6 @@
INIT_FILE = Path("invokeai.yaml")
DB_FILE = Path("invokeai.db")
LEGACY_INIT_FILE = Path("invokeai.init")
-DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
@@ -93,7 +92,7 @@ class InvokeAIAppConfig(BaseSettings):
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
- device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
+ device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
Valid values: `auto`, `float16`, `bfloat16`, `float32`
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
attention_type: Attention type.
Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
@@ -176,7 +175,7 @@ class InvokeAIAppConfig(BaseSettings):
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
# DEVICE
- device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
+ device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
# GENERATION
diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py
index 99f3aee4dac..23674e14e6d 100644
--- a/invokeai/app/services/image_records/image_records_sqlite.py
+++ b/invokeai/app/services/image_records/image_records_sqlite.py
@@ -196,9 +196,13 @@ def get_many(
# Search term condition
if search_term:
query_conditions += """--sql
- AND images.metadata LIKE ?
+ AND (
+ images.metadata LIKE ?
+ OR images.created_at LIKE ?
+ )
"""
query_params.append(f"%{search_term.lower()}%")
+ query_params.append(f"%{search_term.lower()}%")
if starred_first:
query_pagination = f"""--sql
diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py
index 70f40e907c5..1489a7ce454 100644
--- a/invokeai/app/services/images/images_default.py
+++ b/invokeai/app/services/images/images_default.py
@@ -78,7 +78,7 @@ def create(
board_id=board_id, image_name=image_name
)
except Exception as e:
- self.__invoker.services.logger.warn(f"Failed to add image to board {board_id}: {str(e)}")
+ self.__invoker.services.logger.warning(f"Failed to add image to board {board_id}: {str(e)}")
self.__invoker.services.image_files.save(
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
)
diff --git a/invokeai/app/services/model_install/model_install_default.py b/invokeai/app/services/model_install/model_install_default.py
index c49a1cacdb5..f999dd00f1a 100644
--- a/invokeai/app/services/model_install/model_install_default.py
+++ b/invokeai/app/services/model_install/model_install_default.py
@@ -148,7 +148,7 @@ def stop(self, invoker: Optional[Invoker] = None) -> None:
def _clear_pending_jobs(self) -> None:
for job in self.list_jobs():
if not job.in_terminal_state:
- self._logger.warning("Cancelling job {job.id}")
+ self._logger.warning(f"Cancelling job {job.id}")
self.cancel_job(job)
while True:
try:
diff --git a/invokeai/app/services/session_processor/session_processor_default.py b/invokeai/app/services/session_processor/session_processor_default.py
index 07b1bacfc48..6c320eabda5 100644
--- a/invokeai/app/services/session_processor/session_processor_default.py
+++ b/invokeai/app/services/session_processor/session_processor_default.py
@@ -1,3 +1,4 @@
+import gc
import traceback
from contextlib import suppress
from threading import BoundedSemaphore, Thread
@@ -439,6 +440,12 @@ def _process(
poll_now_event.wait(self._polling_interval)
continue
+ # GC-ing here can reduce peak memory usage of the invoke process by freeing allocated memory blocks.
+ # Most queue items take seconds to execute, so the relative cost of a GC is very small.
+ # Python will never cede allocated memory back to the OS, so anything we can do to reduce the peak
+ # allocation is well worth it.
+ gc.collect()
+
self._invoker.services.logger.info(
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
)
diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py
index 9949f29f082..32494413296 100644
--- a/invokeai/app/services/session_queue/session_queue_sqlite.py
+++ b/invokeai/app/services/session_queue/session_queue_sqlite.py
@@ -104,11 +104,7 @@ def _get_highest_priority(self, queue_id: str) -> int:
return cast(Union[int, None], cursor.fetchone()[0]) or 0
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
- return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
-
- def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
try:
- cursor = self._conn.cursor()
# TODO: how does this work in a multi-user scenario?
current_queue_size = self._get_current_queue_size(queue_id)
max_queue_size = self.__invoker.services.configuration.max_queue_size
@@ -118,8 +114,12 @@ def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueB
if prepend:
priority = self._get_highest_priority(queue_id) + 1
- requested_count = calc_session_count(batch)
- values_to_insert = prepare_values_to_insert(
+ requested_count = await asyncio.to_thread(
+ calc_session_count,
+ batch=batch,
+ )
+ values_to_insert = await asyncio.to_thread(
+ prepare_values_to_insert,
queue_id=queue_id,
batch=batch,
priority=priority,
@@ -127,19 +127,16 @@ def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueB
)
enqueued_count = len(values_to_insert)
- if requested_count > enqueued_count:
- values_to_insert = values_to_insert[:max_new_queue_items]
-
- cursor.executemany(
- """--sql
- INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- """,
- values_to_insert,
- )
- self._conn.commit()
+ with self._conn:
+ cursor = self._conn.cursor()
+ cursor.executemany(
+ """--sql
+ INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ """,
+ values_to_insert,
+ )
except Exception:
- self._conn.rollback()
raise
enqueue_result = EnqueueBatchResult(
queue_id=queue_id,
diff --git a/invokeai/backend/ip_adapter/README.md b/invokeai/backend/ip_adapter/README.md
index c85acae4982..7ac845e5346 100644
--- a/invokeai/backend/ip_adapter/README.md
+++ b/invokeai/backend/ip_adapter/README.md
@@ -42,4 +42,5 @@ IP-Adapters:
- [InvokeAI/ip_adapter_plus_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_sd15)
- [InvokeAI/ip_adapter_plus_face_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15)
- [InvokeAI/ip_adapter_sdxl](https://huggingface.co/InvokeAI/ip_adapter_sdxl)
-- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
\ No newline at end of file
+- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
+- [InvokeAI/ip-adapter-plus_sdxl_vit-h](https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h)
\ No newline at end of file
diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py
index f4c057cb5b7..f312aeffb67 100644
--- a/invokeai/backend/model_manager/config.py
+++ b/invokeai/backend/model_manager/config.py
@@ -296,7 +296,7 @@ def flux_lora_format(cls, mod: ModelOnDisk):
from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict
sd = mod.load_state_dict(mod.path)
- value = flux_format_from_state_dict(sd)
+ value = flux_format_from_state_dict(sd, mod.metadata())
mod.cache[key] = value
return value
diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py
index 071713316a1..076919a14fe 100644
--- a/invokeai/backend/model_manager/load/model_loaders/lora.py
+++ b/invokeai/backend/model_manager/load/model_loaders/lora.py
@@ -20,6 +20,10 @@
ModelType,
SubModelType,
)
+from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
+ is_state_dict_likely_in_flux_aitoolkit_format,
+ lora_model_from_flux_aitoolkit_state_dict,
+)
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
is_state_dict_likely_flux_control,
lora_model_from_flux_control_state_dict,
@@ -92,6 +96,8 @@ def _load_model(
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
elif is_state_dict_likely_flux_control(state_dict=state_dict):
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
+ elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict=state_dict):
+ model = lora_model_from_flux_aitoolkit_state_dict(state_dict=state_dict)
else:
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
else:
diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py
index ca585883429..b96f79249dd 100644
--- a/invokeai/backend/model_manager/starter_models.py
+++ b/invokeai/backend/model_manager/starter_models.py
@@ -297,6 +297,15 @@ class StarterModelBundles(BaseModel):
dependencies=[ip_adapter_sdxl_image_encoder],
previous_names=["IP Adapter SDXL"],
)
+ip_adapter_plus_sdxl = StarterModel(
+ name="Precise Reference (IP Adapter Plus ViT-H)",
+ base=BaseModelType.StableDiffusionXL,
+ source="https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h/resolve/main/ip-adapter-plus_sdxl_vit-h.safetensors",
+ description="References images with a higher degree of precision.",
+ type=ModelType.IPAdapter,
+ dependencies=[ip_adapter_sdxl_image_encoder],
+ previous_names=["IP Adapter Plus SDXL"],
+)
ip_adapter_flux = StarterModel(
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
base=BaseModelType.Flux,
@@ -672,6 +681,7 @@ class StarterModelBundles(BaseModel):
ip_adapter_plus_sd1,
ip_adapter_plus_face_sd1,
ip_adapter_sdxl,
+ ip_adapter_plus_sdxl,
ip_adapter_flux,
qr_code_cnet_sd1,
qr_code_cnet_sdxl,
@@ -744,6 +754,7 @@ class StarterModelBundles(BaseModel):
juggernaut_sdxl,
sdxl_fp16_vae_fix,
ip_adapter_sdxl,
+ ip_adapter_plus_sdxl,
canny_sdxl,
depth_sdxl,
softedge_sdxl,
diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py
index 37c1197cc47..e009b203e14 100644
--- a/invokeai/backend/model_manager/taxonomy.py
+++ b/invokeai/backend/model_manager/taxonomy.py
@@ -137,6 +137,7 @@ class FluxLoRAFormat(str, Enum):
Kohya = "flux.kohya"
OneTrainer = "flux.onetrainer"
Control = "flux.control"
+ AIToolkit = "flux.aitoolkit"
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
diff --git a/invokeai/backend/model_patcher.py b/invokeai/backend/model_patcher.py
index 3d614077c1f..a1d8bbed0a5 100644
--- a/invokeai/backend/model_patcher.py
+++ b/invokeai/backend/model_patcher.py
@@ -46,6 +46,10 @@ def apply_ti(
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
ti_list: List[Tuple[str, TextualInversionModelRaw]],
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
+ if len(ti_list) == 0:
+ yield tokenizer, TextualInversionManager(tokenizer)
+ return
+
init_tokens_count = None
new_tokens_added = None
diff --git a/invokeai/backend/patches/lora_conversions/flux_aitoolkit_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/flux_aitoolkit_lora_conversion_utils.py
new file mode 100644
index 00000000000..6ca06a0355f
--- /dev/null
+++ b/invokeai/backend/patches/lora_conversions/flux_aitoolkit_lora_conversion_utils.py
@@ -0,0 +1,63 @@
+import json
+from dataclasses import dataclass, field
+from typing import Any
+
+import torch
+
+from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
+from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
+from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import _group_by_layer
+from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
+from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
+from invokeai.backend.util import InvokeAILogger
+
+
+def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], metadata: dict[str, Any] = None) -> bool:
+ if metadata:
+ try:
+ software = json.loads(metadata.get("software", "{}"))
+ except json.JSONDecodeError:
+ return False
+ return software.get("name") == "ai-toolkit"
+ # metadata got lost somewhere
+ return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys())
+
+
+@dataclass
+class GroupedStateDict:
+ transformer: dict[str, Any] = field(default_factory=dict)
+ # might also grow CLIP and T5 submodels
+
+
+def _group_state_by_submodel(state_dict: dict[str, Any]) -> GroupedStateDict:
+ logger = InvokeAILogger.get_logger()
+ grouped = GroupedStateDict()
+ for key, value in state_dict.items():
+ submodel_name, param_name = key.split(".", 1)
+ match submodel_name:
+ case "diffusion_model":
+ grouped.transformer[param_name] = value
+ case _:
+ logger.warning(f"Unexpected submodel name: {submodel_name}")
+ return grouped
+
+
+def _rename_peft_lora_keys(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
+ """Renames keys from the PEFT LoRA format to the InvokeAI format."""
+ renamed_state_dict = {}
+ for key, value in state_dict.items():
+ renamed_key = key.replace(".lora_A.", ".lora_down.").replace(".lora_B.", ".lora_up.")
+ renamed_state_dict[renamed_key] = value
+ return renamed_state_dict
+
+
+def lora_model_from_flux_aitoolkit_state_dict(state_dict: dict[str, torch.Tensor]) -> ModelPatchRaw:
+ state_dict = _rename_peft_lora_keys(state_dict)
+ by_layer = _group_by_layer(state_dict)
+ by_model = _group_state_by_submodel(by_layer)
+
+ layers: dict[str, BaseLayerPatch] = {}
+ for layer_key, layer_state_dict in by_model.transformer.items():
+ layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
+
+ return ModelPatchRaw(layers=layers)
diff --git a/invokeai/backend/patches/lora_conversions/formats.py b/invokeai/backend/patches/lora_conversions/formats.py
index 46073100679..94f71e05ee6 100644
--- a/invokeai/backend/patches/lora_conversions/formats.py
+++ b/invokeai/backend/patches/lora_conversions/formats.py
@@ -1,4 +1,7 @@
from invokeai.backend.model_manager.taxonomy import FluxLoRAFormat
+from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
+ is_state_dict_likely_in_flux_aitoolkit_format,
+)
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
is_state_dict_likely_in_flux_diffusers_format,
@@ -11,7 +14,7 @@
)
-def flux_format_from_state_dict(state_dict):
+def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None) -> FluxLoRAFormat | None:
if is_state_dict_likely_in_flux_kohya_format(state_dict):
return FluxLoRAFormat.Kohya
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict):
@@ -20,5 +23,7 @@ def flux_format_from_state_dict(state_dict):
return FluxLoRAFormat.Diffusers
elif is_state_dict_likely_flux_control(state_dict):
return FluxLoRAFormat.Control
+ elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict, metadata):
+ return FluxLoRAFormat.AIToolkit
else:
return None
diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json
index 0a9a99d3b98..3d1afd62ae6 100644
--- a/invokeai/frontend/web/package.json
+++ b/invokeai/frontend/web/package.json
@@ -68,7 +68,7 @@
"cmdk": "^1.1.1",
"compare-versions": "^6.1.1",
"filesize": "^10.1.6",
- "fracturedjsonjs": "^4.0.2",
+ "fracturedjsonjs": "^4.1.0",
"framer-motion": "^11.10.0",
"i18next": "^25.0.1",
"i18next-http-backend": "^3.0.2",
diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml
index 939856deb1c..bb5722c9345 100644
--- a/invokeai/frontend/web/pnpm-lock.yaml
+++ b/invokeai/frontend/web/pnpm-lock.yaml
@@ -54,8 +54,8 @@ dependencies:
specifier: ^10.1.6
version: 10.1.6
fracturedjsonjs:
- specifier: ^4.0.2
- version: 4.0.2
+ specifier: ^4.1.0
+ version: 4.1.0
framer-motion:
specifier: ^11.10.0
version: 11.10.0(react-dom@18.3.1)(react@18.3.1)
@@ -5280,8 +5280,8 @@ packages:
signal-exit: 4.1.0
dev: true
- /fracturedjsonjs@4.0.2:
- resolution: {integrity: sha512-+vGJH9wK0EEhbbn50V2sOebLRaar1VL3EXr02kxchIwpkhQk0ItrPjIOtYPYuU9hNFpVzxjrPgzjtMJih+ae4A==}
+ /fracturedjsonjs@4.1.0:
+ resolution: {integrity: sha512-qy6LPA8OOiiyRHt5/sNKDayD7h5r3uHmHxSOLbBsgtU/hkt5vOVWOR51MdfDbeCNfj7k/dKCRbXYm8FBAJcgWQ==}
dev: false
/framer-motion@10.18.0(react-dom@18.3.1)(react@18.3.1):
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx
index c8d3f576876..0bd9637fea6 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/DataViewer.tsx
@@ -2,7 +2,7 @@ import type { FlexProps } from '@invoke-ai/ui-library';
import { Box, chakra, Flex, IconButton, Tooltip, useShiftModifier } from '@invoke-ai/ui-library';
import { getOverlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
import { useClipboard } from 'common/hooks/useClipboard';
-import { Formatter } from 'fracturedjsonjs';
+import { Formatter, TableCommaPlacement } from 'fracturedjsonjs';
import { isString } from 'lodash-es';
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
import type { CSSProperties } from 'react';
@@ -11,6 +11,8 @@ import { useTranslation } from 'react-i18next';
import { PiCopyBold, PiDownloadSimpleBold } from 'react-icons/pi';
const formatter = new Formatter();
+formatter.Options.TableCommaPlacement = TableCommaPlacement.BeforePadding;
+formatter.Options.OmitTrailingWhitespace = true;
type Props = {
label: string;
@@ -19,6 +21,7 @@ type Props = {
withDownload?: boolean;
withCopy?: boolean;
extraCopyActions?: { label: string; getData: (data: unknown) => unknown }[];
+ wrapData?: boolean;
} & FlexProps;
const overlayscrollbarsOptions = getOverlayScrollbarsParams({
@@ -29,7 +32,16 @@ const overlayscrollbarsOptions = getOverlayScrollbarsParams({
const ChakraPre = chakra('pre');
const DataViewer = (props: Props) => {
- const { label, data, fileName, withDownload = true, withCopy = true, extraCopyActions, ...rest } = props;
+ const {
+ label,
+ data,
+ fileName,
+ withDownload = true,
+ withCopy = true,
+ extraCopyActions,
+ wrapData = true,
+ ...rest
+ } = props;
const dataString = useMemo(() => (isString(data) ? data : formatter.Serialize(data)) ?? '', [data]);
const shift = useShiftModifier();
const clipboard = useClipboard();
@@ -53,7 +65,7 @@ const DataViewer = (props: Props) => {
- {dataString}
+ {dataString}
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx
index 73f24b73512..b2de652ff88 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/PublishWorkflowPanelContent.tsx
@@ -22,6 +22,7 @@ import { NodeFieldElementOverlay } from 'features/nodes/components/sidePanel/bui
import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
import {
$isInPublishFlow,
+ $isPublishing,
$isReadyToDoValidationRun,
$isSelectingOutputNode,
$outputNodeId,
@@ -183,13 +184,14 @@ SelectOutputNodeButton.displayName = 'SelectOutputNodeButton';
const CancelPublishButton = memo(() => {
const { t } = useTranslation();
+ const isPublishing = useStore($isPublishing);
const onClick = useCallback(() => {
$isInPublishFlow.set(false);
$isSelectingOutputNode.set(false);
$outputNodeId.set(null);
}, []);
return (
- } onClick={onClick}>
+ } onClick={onClick} isDisabled={isPublishing}>
{t('common.cancel')}
);
@@ -198,6 +200,7 @@ CancelPublishButton.displayName = 'CancelDeployButton';
const PublishWorkflowButton = memo(() => {
const { t } = useTranslation();
+ const isPublishing = useStore($isPublishing);
const isReadyToDoValidationRun = useStore($isReadyToDoValidationRun);
const isReadyToEnqueue = useStore($isReadyToEnqueue);
const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
@@ -211,6 +214,7 @@ const PublishWorkflowButton = memo(() => {
const enqueue = useEnqueueWorkflows();
const onClick = useCallback(async () => {
+ $isPublishing.set(true);
const result = await withResultAsync(() => enqueue(true, true));
if (result.isErr()) {
toast({
@@ -244,8 +248,30 @@ const PublishWorkflowButton = memo(() => {
});
log.debug(parseify(result.value), 'Enqueued batch');
}
+ $isPublishing.set(false);
}, [enqueue, projectUrl, t]);
+ const isDisabled = useMemo(() => {
+ return (
+ !allowPublishWorkflows ||
+ !isReadyToEnqueue ||
+ doesWorkflowHaveUnsavedChanges ||
+ hasUnpublishableNodes ||
+ !isReadyToDoValidationRun ||
+ !(outputNodeId !== null && !isSelectingOutputNode) ||
+ isPublishing
+ );
+ }, [
+ allowPublishWorkflows,
+ doesWorkflowHaveUnsavedChanges,
+ hasUnpublishableNodes,
+ isReadyToDoValidationRun,
+ isReadyToEnqueue,
+ isSelectingOutputNode,
+ outputNodeId,
+ isPublishing,
+ ]);
+
return (
{
hasPublishableInputs={inputs.publishable.length > 0}
hasUnpublishableInputs={inputs.unpublishable.length > 0}
>
- }
- isDisabled={
- !allowPublishWorkflows ||
- !isReadyToEnqueue ||
- doesWorkflowHaveUnsavedChanges ||
- hasUnpublishableNodes ||
- !isReadyToDoValidationRun ||
- !(outputNodeId !== null && !isSelectingOutputNode)
- }
- onClick={onClick}
- >
- {t('workflows.builder.publish')}
+ } isDisabled={isDisabled} onClick={onClick}>
+ {isPublishing ? t('workflows.builder.publishing') : t('workflows.builder.publish')}
);
@@ -337,6 +352,10 @@ export const StartPublishFlowButton = memo(() => {
$isInPublishFlow.set(true);
}, []);
+ const isDisabled = useMemo(() => {
+ return !allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes;
+ }, [allowPublishWorkflows, doesWorkflowHaveUnsavedChanges, hasUnpublishableNodes, isReadyToEnqueue]);
+
return (
{
hasPublishableInputs={inputs.publishable.length > 0}
hasUnpublishableInputs={inputs.unpublishable.length > 0}
>
- }
- variant="ghost"
- size="sm"
- isDisabled={
- !allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes
- }
- >
+ } variant="ghost" size="sm" isDisabled={isDisabled}>
{t('workflows.builder.publish')}
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts
index cf5b6fb2c10..ba880994931 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts
@@ -19,6 +19,7 @@ import { useGetBatchStatusQuery } from 'services/api/endpoints/queue';
import { useGetWorkflowQuery } from 'services/api/endpoints/workflows';
import { assert } from 'tsafe';
+export const $isPublishing = atom(false);
export const $isInPublishFlow = atom(false);
export const $outputNodeId = atom(null);
export const $isSelectingOutputNode = atom(false);
diff --git a/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx b/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx
index 028eb7291e0..bb2eb14a687 100644
--- a/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx
+++ b/invokeai/frontend/web/src/features/system/components/AboutModal/AboutModal.tsx
@@ -58,7 +58,7 @@ const AboutModal = ({ children }: AboutModalProps) => {
{cloneElement(children, {
onClick: onOpen,
})}
-
+
{t('accessibility.about')}
@@ -66,7 +66,7 @@ const AboutModal = ({ children }: AboutModalProps) => {
-
+
diff --git a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts
index 27ed885726f..ebcde2840c0 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/appInfo.ts
@@ -1,7 +1,7 @@
import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
import type { OpenAPIV3_1 } from 'openapi-types';
import type { paths } from 'services/api/schema';
-import type { AppConfig, AppDependencyVersions, AppVersion } from 'services/api/types';
+import type { AppConfig, AppVersion } from 'services/api/types';
import { api, buildV1Url } from '..';
@@ -22,7 +22,10 @@ export const appInfoApi = api.injectEndpoints({
}),
providesTags: ['FetchOnReconnect'],
}),
- getAppDeps: build.query({
+ getAppDeps: build.query<
+ paths['/api/v1/app/app_deps']['get']['responses']['200']['content']['application/json'],
+ void
+ >({
query: () => ({
url: buildAppInfoUrl('app_deps'),
method: 'GET',
diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts
index 08ce8488448..1b866765965 100644
--- a/invokeai/frontend/web/src/services/api/schema.ts
+++ b/invokeai/frontend/web/src/services/api/schema.ts
@@ -1925,77 +1925,6 @@ export type components = {
*/
watermarking_methods: string[];
};
- /**
- * AppDependencyVersions
- * @description App depencency Versions Response
- */
- AppDependencyVersions: {
- /**
- * Accelerate
- * @description accelerate version
- */
- accelerate: string;
- /**
- * Compel
- * @description compel version
- */
- compel: string;
- /**
- * Cuda
- * @description CUDA version
- */
- cuda: string | null;
- /**
- * Diffusers
- * @description diffusers version
- */
- diffusers: string;
- /**
- * Numpy
- * @description Numpy version
- */
- numpy: string;
- /**
- * Opencv
- * @description OpenCV version
- */
- opencv: string;
- /**
- * Onnx
- * @description ONNX version
- */
- onnx: string;
- /**
- * Pillow
- * @description Pillow (PIL) version
- */
- pillow: string;
- /**
- * Python
- * @description Python version
- */
- python: string;
- /**
- * Torch
- * @description PyTorch version
- */
- torch: string;
- /**
- * Torchvision
- * @description PyTorch Vision version
- */
- torchvision: string;
- /**
- * Transformers
- * @description transformers version
- */
- transformers: string;
- /**
- * Xformers
- * @description xformers version
- */
- xformers: string | null;
- };
/**
* AppVersion
* @description App Version Response
@@ -12062,7 +11991,7 @@ export type components = {
* vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
* lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
* pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
- * device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
+ * device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
* precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
Valid values: `auto`, `float16`, `bfloat16`, `float32`
* sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
* attention_type: Attention type.
Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
@@ -12337,11 +12266,10 @@ export type components = {
pytorch_cuda_alloc_conf?: string | null;
/**
* Device
- * @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
+ * @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
* @default auto
- * @enum {string}
*/
- device?: "auto" | "cpu" | "cuda" | "cuda:1" | "mps";
+ device?: string;
/**
* Precision
* @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
@@ -24226,7 +24154,9 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["AppDependencyVersions"];
+ "application/json": {
+ [key: string]: string;
+ };
};
};
};
diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts
index b0f7d000885..14882f43e52 100644
--- a/invokeai/frontend/web/src/services/api/types.ts
+++ b/invokeai/frontend/web/src/services/api/types.ts
@@ -31,7 +31,6 @@ export type InvocationJSONSchemaExtra = S['UIConfigBase'];
// App Info
export type AppVersion = S['AppVersion'];
export type AppConfig = S['AppConfig'];
-export type AppDependencyVersions = S['AppDependencyVersions'];
// Images
export type ImageDTO = S['ImageDTO'];
diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py
index 51551062c0a..b1eaa2d447d 100644
--- a/invokeai/version/invokeai_version.py
+++ b/invokeai/version/invokeai_version.py
@@ -1 +1 @@
-__version__ = "5.14.0"
+__version__ = "5.15.0"
diff --git a/pyproject.toml b/pyproject.toml
index 5a05b1f0c07..69f6a4150fa 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -109,6 +109,12 @@ dependencies = [
"humanize==4.12.1",
]
+[tool.uv]
+# Prevent opencv-python from ever being chosen during dependency resolution.
+# This prevents conflicts with opencv-contrib-python, which Invoke requires.
+override-dependencies = ["opencv-python; sys_platform=='never'"]
+
+
[project.scripts]
"invokeai-web" = "invokeai.app.run_app:run_app"
diff --git a/tests/backend/patches/lora_conversions/lora_state_dicts/flux_lora_aitoolkit_format.py b/tests/backend/patches/lora_conversions/lora_state_dicts/flux_lora_aitoolkit_format.py
new file mode 100644
index 00000000000..98b278df869
--- /dev/null
+++ b/tests/backend/patches/lora_conversions/lora_state_dicts/flux_lora_aitoolkit_format.py
@@ -0,0 +1,458 @@
+state_dict_keys = {
+ "diffusion_model.double_blocks.0.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.0.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.0.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.0.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.0.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.0.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.0.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.0.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.0.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.0.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.0.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.0.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.0.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.0.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.0.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.0.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.1.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.1.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.1.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.1.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.1.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.1.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.1.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.1.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.1.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.1.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.1.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.1.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.1.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.1.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.1.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.1.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.10.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.10.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.10.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.10.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.10.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.10.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.10.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.10.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.10.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.10.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.10.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.10.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.10.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.10.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.10.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.10.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.11.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.11.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.11.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.11.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.11.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.11.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.11.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.11.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.11.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.11.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.11.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.11.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.11.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.11.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.11.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.11.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.12.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.12.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.12.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.12.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.12.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.12.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.12.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.12.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.12.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.12.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.12.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.12.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.12.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.12.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.12.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.12.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.13.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.13.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.13.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.13.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.13.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.13.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.13.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.13.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.13.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.13.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.13.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.13.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.13.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.13.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.13.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.13.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.14.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.14.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.14.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.14.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.14.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.14.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.14.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.14.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.14.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.14.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.14.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.14.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.14.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.14.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.14.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.14.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.15.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.15.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.15.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.15.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.15.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.15.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.15.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.15.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.15.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.15.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.15.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.15.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.15.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.15.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.15.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.15.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.16.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.16.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.16.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.16.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.16.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.16.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.16.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.16.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.16.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.16.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.16.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.16.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.16.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.16.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.16.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.16.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.17.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.17.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.17.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.17.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.17.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.17.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.17.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.17.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.17.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.17.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.17.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.17.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.17.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.17.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.17.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.17.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.18.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.18.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.18.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.18.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.18.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.18.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.18.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.18.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.18.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.18.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.18.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.18.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.18.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.18.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.18.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.18.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.2.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.2.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.2.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.2.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.2.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.2.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.2.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.2.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.2.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.2.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.2.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.2.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.2.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.2.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.2.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.2.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.3.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.3.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.3.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.3.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.3.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.3.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.3.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.3.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.3.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.3.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.3.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.3.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.3.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.3.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.3.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.3.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.4.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.4.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.4.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.4.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.4.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.4.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.4.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.4.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.4.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.4.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.4.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.4.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.4.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.4.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.4.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.4.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.5.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.5.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.5.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.5.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.5.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.5.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.5.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.5.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.5.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.5.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.5.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.5.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.5.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.5.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.5.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.5.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.6.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.6.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.6.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.6.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.6.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.6.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.6.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.6.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.6.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.6.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.6.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.6.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.6.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.6.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.6.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.6.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.7.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.7.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.7.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.7.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.7.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.7.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.7.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.7.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.7.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.7.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.7.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.7.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.7.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.7.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.7.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.7.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.8.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.8.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.8.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.8.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.8.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.8.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.8.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.8.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.8.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.8.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.8.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.8.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.8.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.8.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.8.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.8.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.9.img_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.9.img_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.9.img_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.9.img_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.9.img_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.9.img_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.9.img_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.9.img_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.9.txt_attn.proj.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.9.txt_attn.proj.lora_B.weight": [3072, 16],
+ "diffusion_model.double_blocks.9.txt_attn.qkv.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.9.txt_attn.qkv.lora_B.weight": [9216, 16],
+ "diffusion_model.double_blocks.9.txt_mlp.0.lora_A.weight": [16, 3072],
+ "diffusion_model.double_blocks.9.txt_mlp.0.lora_B.weight": [12288, 16],
+ "diffusion_model.double_blocks.9.txt_mlp.2.lora_A.weight": [16, 12288],
+ "diffusion_model.double_blocks.9.txt_mlp.2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.0.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.0.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.0.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.0.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.1.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.1.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.1.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.1.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.10.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.10.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.10.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.10.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.11.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.11.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.11.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.11.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.12.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.12.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.12.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.12.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.13.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.13.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.13.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.13.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.14.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.14.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.14.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.14.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.15.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.15.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.15.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.15.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.16.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.16.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.16.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.16.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.17.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.17.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.17.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.17.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.18.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.18.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.18.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.18.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.19.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.19.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.19.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.19.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.2.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.2.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.2.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.2.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.20.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.20.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.20.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.20.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.21.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.21.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.21.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.21.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.22.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.22.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.22.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.22.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.23.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.23.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.23.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.23.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.24.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.24.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.24.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.24.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.25.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.25.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.25.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.25.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.26.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.26.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.26.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.26.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.27.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.27.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.27.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.27.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.28.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.28.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.28.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.28.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.29.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.29.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.29.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.29.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.3.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.3.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.3.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.3.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.30.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.30.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.30.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.30.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.31.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.31.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.31.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.31.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.32.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.32.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.32.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.32.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.33.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.33.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.33.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.33.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.34.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.34.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.34.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.34.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.35.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.35.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.35.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.35.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.36.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.36.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.36.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.36.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.37.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.37.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.37.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.37.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.4.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.4.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.4.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.4.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.5.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.5.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.5.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.5.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.6.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.6.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.6.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.6.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.7.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.7.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.7.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.7.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.8.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.8.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.8.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.8.linear2.lora_B.weight": [3072, 16],
+ "diffusion_model.single_blocks.9.linear1.lora_A.weight": [16, 3072],
+ "diffusion_model.single_blocks.9.linear1.lora_B.weight": [21504, 16],
+ "diffusion_model.single_blocks.9.linear2.lora_A.weight": [16, 15360],
+ "diffusion_model.single_blocks.9.linear2.lora_B.weight": [3072, 16],
+}
diff --git a/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py
new file mode 100644
index 00000000000..ed3e05a9b26
--- /dev/null
+++ b/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py
@@ -0,0 +1,59 @@
+import accelerate
+import pytest
+
+from invokeai.backend.flux.model import Flux
+from invokeai.backend.flux.util import params
+from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
+ _group_state_by_submodel,
+ is_state_dict_likely_in_flux_aitoolkit_format,
+ lora_model_from_flux_aitoolkit_state_dict,
+)
+from tests.backend.patches.lora_conversions.lora_state_dicts.flux_dora_onetrainer_format import (
+ state_dict_keys as flux_onetrainer_state_dict_keys,
+)
+from tests.backend.patches.lora_conversions.lora_state_dicts.flux_lora_aitoolkit_format import (
+ state_dict_keys as flux_aitoolkit_state_dict_keys,
+)
+from tests.backend.patches.lora_conversions.lora_state_dicts.flux_lora_diffusers_format import (
+ state_dict_keys as flux_diffusers_state_dict_keys,
+)
+from tests.backend.patches.lora_conversions.lora_state_dicts.utils import keys_to_mock_state_dict
+
+
+def test_is_state_dict_likely_in_flux_aitoolkit_format():
+ state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
+ assert is_state_dict_likely_in_flux_aitoolkit_format(state_dict)
+
+
+@pytest.mark.parametrize("sd_keys", [flux_diffusers_state_dict_keys, flux_onetrainer_state_dict_keys])
+def test_is_state_dict_likely_in_flux_kohya_format_false(sd_keys: dict[str, list[int]]):
+ state_dict = keys_to_mock_state_dict(sd_keys)
+ assert not is_state_dict_likely_in_flux_aitoolkit_format(state_dict)
+
+
+def test_flux_aitoolkit_transformer_state_dict_is_in_invoke_format():
+ state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
+ converted_state_dict = _group_state_by_submodel(state_dict).transformer
+
+ # Extract the prefixes from the converted state dict (without the lora suffixes)
+ converted_key_prefixes: list[str] = []
+ for k in converted_state_dict.keys():
+ k = k.replace(".lora_A.weight", "")
+ k = k.replace(".lora_B.weight", "")
+ converted_key_prefixes.append(k)
+
+ # Initialize a FLUX model on the meta device.
+ with accelerate.init_empty_weights():
+ model = Flux(params["flux-schnell"])
+ model_keys = set(model.state_dict().keys())
+
+ for converted_key_prefix in converted_key_prefixes:
+ assert any(model_key.startswith(converted_key_prefix) for model_key in model_keys), (
+ f"'{converted_key_prefix}' did not match any model keys."
+ )
+
+
+def test_lora_model_from_flux_aitoolkit_state_dict():
+ state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
+
+ assert lora_model_from_flux_aitoolkit_state_dict(state_dict)
diff --git a/tests/backend/util/test_devices.py b/tests/backend/util/test_devices.py
index 8e810e43678..b65137c08dc 100644
--- a/tests/backend/util/test_devices.py
+++ b/tests/backend/util/test_devices.py
@@ -10,7 +10,7 @@
from invokeai.app.services.config import get_config
from invokeai.backend.util.devices import TorchDevice, choose_precision, choose_torch_device, torch_dtype
-devices = ["cpu", "cuda:0", "cuda:1", "mps"]
+devices = ["cpu", "cuda:0", "cuda:1", "cuda:2", "mps"]
device_types_cpu = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float32)]
device_types_cuda = [("cpu", torch.float32), ("cuda:0", torch.float16), ("mps", torch.float32)]
device_types_mps = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float16)]
diff --git a/uv.lock b/uv.lock
index a64a7590936..035dd0330d0 100644
--- a/uv.lock
+++ b/uv.lock
@@ -13,6 +13,9 @@ resolution-markers = [
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
+[manifest]
+overrides = [{ name = "opencv-python", marker = "sys_platform == 'never'" }]
+
[[package]]
name = "absl-py"
version = "2.2.1"
@@ -948,7 +951,7 @@ version = "0.2.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy" },
- { name = "opencv-python" },
+ { name = "opencv-python", marker = "sys_platform == 'never'" },
{ name = "pillow" },
{ name = "pywavelets" },
{ name = "torch" },
@@ -2043,17 +2046,9 @@ name = "opencv-python"
version = "4.9.0.80"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "numpy" },
+ { name = "numpy", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/25/72/da7c69a3542071bf1e8f65336721b8b2659194425438d988f79bc14ed9cc/opencv-python-4.9.0.80.tar.gz", hash = "sha256:1a9f0e6267de3a1a1db0c54213d022c7c8b5b9ca4b580e80bdc58516c922c9e1", size = 92896686 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/35/69/b657974ddcbba54d59d7d62b01e60a8b815e35f415b996e4d355be0ac7b4/opencv_python-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:7e5f7aa4486651a6ebfa8ed4b594b65bd2d2f41beeb4241a3e4b1b85acbbbadb", size = 55689340 },
- { url = "https://files.pythonhosted.org/packages/77/df/b56175c3fb5bc058774bdcf35f5a71cf9c3c5b909f98a1c688eb71cd3b1f/opencv_python-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:71dfb9555ccccdd77305fc3dcca5897fbf0cf28b297c51ee55e079c065d812a3", size = 35354525 },
- { url = "https://files.pythonhosted.org/packages/52/00/2adf376707c7965bb4569f28f73fafe303c404d01047b10e3b52761be086/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b34a52e9da36dda8c151c6394aed602e4b17fa041df0b9f5b93ae10b0fcca2a", size = 41289855 },
- { url = "https://files.pythonhosted.org/packages/d9/64/7fdfb9386511cd6805451e012c537073a79a958a58795c4e602e538c388c/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4088cab82b66a3b37ffc452976b14a3c599269c247895ae9ceb4066d8188a57", size = 62208946 },
- { url = "https://files.pythonhosted.org/packages/76/9e/db1c2d56c04b97981c06663384f45f28950a73d9acf840c4006d60d0a1ff/opencv_python-4.9.0.80-cp37-abi3-win32.whl", hash = "sha256:dcf000c36dd1651118a2462257e3a9e76db789a78432e1f303c7bac54f63ef6c", size = 28546907 },
- { url = "https://files.pythonhosted.org/packages/c7/ec/9dabb6a9abfdebb3c45b0cc52dec901caafef2b2c7e7d6a839ed86d81e91/opencv_python-4.9.0.80-cp37-abi3-win_amd64.whl", hash = "sha256:3f16f08e02b2a2da44259c7cc712e779eff1dd8b55fdb0323e8cab09548086c0", size = 38624911 },
-]
[[package]]
name = "opt-einsum"