Skip to content

Commit 18e7d6d

Browse files
authored
mm/mp: always unload re-used but modified models (#10724)
The partial unloader path in model re-use flow skips straight to the actual unload without any check of the patching UUID. This means that if you do an upscale flow with a model patch on an existing model, it will not apply your patchings. Fix by delaying the partial_unload until after the uuid checks. This is done by making partial_unload a model of partial_load where extra_mem is -ve.
1 parent e1d85e7 commit 18e7d6d

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

comfy/model_management.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -503,10 +503,7 @@ def model_load(self, lowvram_model_memory=0, force_patch_weights=False):
503503
use_more_vram = lowvram_model_memory
504504
if use_more_vram == 0:
505505
use_more_vram = 1e32
506-
if use_more_vram > 0:
507-
self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights)
508-
else:
509-
self.model.partially_unload(self.model.offload_device, -use_more_vram, force_patch_weights=force_patch_weights)
506+
self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights)
510507

511508
real_model = self.model.model
512509

comfy/model_patcher.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -928,6 +928,9 @@ def partially_load(self, device_to, extra_memory=0, force_patch_weights=False):
928928
extra_memory += (used - self.model.model_loaded_weight_memory)
929929

930930
self.patch_model(load_weights=False)
931+
if extra_memory < 0 and not unpatch_weights:
932+
self.partially_unload(self.offload_device, -extra_memory, force_patch_weights=force_patch_weights)
933+
return 0
931934
full_load = False
932935
if self.model.model_lowvram == False and self.model.model_loaded_weight_memory > 0:
933936
self.apply_hooks(self.forced_hooks, force_apply=True)

0 commit comments

Comments
 (0)