Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
8c5d45e
Correct html head and switch litegraph to pointer mode.
Zentropivity Mar 16, 2023
68e9cf7
Merge branch 'comfyanonymous:master' into mobile-support
Zentropivity Mar 16, 2023
963b68b
Remove mobile client-side zoom
Zentropivity Mar 17, 2023
e9206a5
Fix double click handling by not using faulty is_primary check
Zentropivity Mar 23, 2023
3ebf745
nodes: add ImagePadForOutpaint
guoyk93 Mar 23, 2023
7a7e328
Added support for converting widgets to inputs (and back)
pythongosssss Mar 23, 2023
bb00176
Remove debugger
pythongosssss Mar 23, 2023
4f24e6a
Allow output folder to be a symbolic link
jn-jairo Mar 24, 2023
b13539c
Sync widget changes
pythongosssss Mar 24, 2023
89fd5ed
Added MPS device support
GaidamakUA Mar 24, 2023
4b943d2
Removed unnecessary comment
GaidamakUA Mar 24, 2023
4580f3e
nodes: add feathering to to ImagePadForOutpaint
guoyk93 Mar 24, 2023
7f0fd99
Make ddim work with --cpu
comfyanonymous Mar 24, 2023
97198f5
Restore original size after add/remove input
pythongosssss Mar 24, 2023
1fa9cca
Set title to widget name
pythongosssss Mar 24, 2023
fc71e7e
Fixed typo
GaidamakUA Mar 24, 2023
3c6ff88
Merge branch 'master' of https://github.com/GaidamakUA/ComfyUI
comfyanonymous Mar 24, 2023
4461ddc
Change close on mouseleave to false
Zentropivity Mar 24, 2023
4adcea7
I don't think controlnets were being handled correctly by MPS.
comfyanonymous Mar 24, 2023
d6830b9
Prevent exactly overlapping nodes
pythongosssss Mar 24, 2023
5c03b2f
Merge branch 'mobile-support' of https://github.com/Zentropivity/ComfyUI
comfyanonymous Mar 24, 2023
edb3dea
Merge branch 'widget-inputs' of https://github.com/pythongosssss/ComfyUI
comfyanonymous Mar 25, 2023
44b6eaa
Don't completely break workflow if field name changes.
comfyanonymous Mar 25, 2023
bfe64dc
Add support for Google Drive mount feature in Colab Notebook (#193)
ltdrdata Mar 25, 2023
4c01386
nodes: ImagePadForOutpaint: fix feathering, ignore edges not expanded
guoyk93 Mar 25, 2023
3b87302
nodes: ImagePadForOutpaint: clean imports
guoyk93 Mar 25, 2023
d741e7a
Merge branch 'feat-add-image-pad-for-outpaint' of https://github.com/…
comfyanonymous Mar 25, 2023
a30526a
Set default value of feathering to 40.
comfyanonymous Mar 25, 2023
070b574
Slot colors by type
jn-jairo Mar 25, 2023
58695f9
Fix seed being impossible to increment/decrement by 1.
comfyanonymous Mar 25, 2023
8d66827
Merge branch 'slot-color' of https://github.com/jn-jairo/ComfyUI
comfyanonymous Mar 25, 2023
bff0c40
Update screenshot.
comfyanonymous Mar 25, 2023
e651be5
Merge branch 'allow-symbolic-link' of https://github.com/jn-jairo/Com…
comfyanonymous Mar 25, 2023
a5c78a5
Draggable menu
jn-jairo Mar 25, 2023
9287770
Allow negative Lora strengths.
comfyanonymous Mar 26, 2023
48d4edb
Prevent draggable menu to get outside of window
jn-jairo Mar 26, 2023
980379b
change colors to a more consistent and pleasing palette
Mar 26, 2023
754553f
Merge branch 'aesthetic-colors' of https://github.com/throwaway-mezzo…
comfyanonymous Mar 26, 2023
d68350e
Merge branch 'draggable-menu' of https://github.com/jn-jairo/ComfyUI
comfyanonymous Mar 26, 2023
f5365c9
Fix ddim for Mac: #264
comfyanonymous Mar 26, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion comfy/ldm/models/diffusion/ddim.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def __init__(self, model, schedule="linear", device=torch.device("cuda"), **kwar
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != self.device:
attr = attr.to(self.device)
attr = attr.float().to(self.device)
setattr(self, name, attr)

def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
Expand Down
26 changes: 22 additions & 4 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
LOW_VRAM = 2
NORMAL_VRAM = 3
HIGH_VRAM = 4
MPS = 5

accelerate_enabled = False
vram_state = NORMAL_VRAM
Expand Down Expand Up @@ -76,10 +77,16 @@
total_vram_available_mb = (total_vram - 1024) // 2
total_vram_available_mb = int(max(256, total_vram_available_mb))

try:
if torch.backends.mps.is_available():
vram_state = MPS
except:
pass

if "--cpu" in sys.argv:
vram_state = CPU

print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM"][vram_state])
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state])


current_loaded_model = None
Expand Down Expand Up @@ -128,6 +135,10 @@ def load_model_gpu(model):
current_loaded_model = model
if vram_state == CPU:
pass
elif vram_state == MPS:
mps_device = torch.device("mps")
real_model.to(mps_device)
pass
elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM:
model_accelerated = False
real_model.cuda()
Expand Down Expand Up @@ -155,9 +166,10 @@ def load_controlnet_gpu(models):
if m not in models:
m.cpu()

device = get_torch_device()
current_gpu_controlnets = []
for m in models:
current_gpu_controlnets.append(m.cuda())
current_gpu_controlnets.append(m.to(device))


def load_if_low_vram(model):
Expand All @@ -173,6 +185,8 @@ def unload_if_low_vram(model):
return model

def get_torch_device():
if vram_state == MPS:
return torch.device("mps")
if vram_state == CPU:
return torch.device("cpu")
else:
Expand All @@ -195,7 +209,7 @@ def get_free_memory(dev=None, torch_free_too=False):
if dev is None:
dev = get_torch_device()

if hasattr(dev, 'type') and dev.type == 'cpu':
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
mem_free_total = psutil.virtual_memory().available
mem_free_torch = mem_free_total
else:
Expand Down Expand Up @@ -224,8 +238,12 @@ def cpu_mode():
global vram_state
return vram_state == CPU

def mps_mode():
global vram_state
return vram_state == MPS

def should_use_fp16():
if cpu_mode():
if cpu_mode() or mps_mode():
return False #TODO ?

if torch.cuda.is_bf16_supported():
Expand Down
2 changes: 1 addition & 1 deletion comfy/samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=N
noise_mask = None
if denoise_mask is not None:
noise_mask = 1.0 - denoise_mask
sampler = DDIMSampler(self.model)
sampler = DDIMSampler(self.model, device=self.device)
sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False)
z_enc = sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(self.device), noise=noise, max_denoise=max_denoise)
samples, _ = sampler.sample_custom(ddim_timesteps=timesteps,
Expand Down
Binary file modified comfyui_screenshot.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
70 changes: 67 additions & 3 deletions nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,8 @@ def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"clip": ("CLIP", ),
"lora_name": (folder_paths.get_filename_list("loras"), ),
"strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL", "CLIP")
FUNCTION = "load_lora"
Expand Down Expand Up @@ -752,7 +752,7 @@ def map_filename(filename):

full_output_folder = os.path.join(self.output_dir, subfolder)

if os.path.commonpath((self.output_dir, os.path.realpath(full_output_folder))) != self.output_dir:
if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
print("Saving image outside the output folder is not allowed.")
return {}

Expand Down Expand Up @@ -908,6 +908,69 @@ def invert(self, image):
return (s,)


class ImagePadForOutpaint:

@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
"left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
"top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
"right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
"bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
"feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
}
}

RETURN_TYPES = ("IMAGE", "MASK")
FUNCTION = "expand_image"

CATEGORY = "image"

def expand_image(self, image, left, top, right, bottom, feathering):
d1, d2, d3, d4 = image.size()

new_image = torch.zeros(
(d1, d2 + top + bottom, d3 + left + right, d4),
dtype=torch.float32,
)
new_image[:, top:top + d2, left:left + d3, :] = image

mask = torch.ones(
(d2 + top + bottom, d3 + left + right),
dtype=torch.float32,
)

t = torch.zeros(
(d2, d3),
dtype=torch.float32
)

if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:

for i in range(d2):
for j in range(d3):
dt = i if top != 0 else d2
db = d2 - i if bottom != 0 else d2

dl = j if left != 0 else d3
dr = d3 - j if right != 0 else d3

d = min(dt, db, dl, dr)

if d >= feathering:
continue

v = (feathering - d) / feathering

t[i, j] = v * v

mask[top:top + d2, left:left + d3] = t

return (new_image, mask)


NODE_CLASS_MAPPINGS = {
"KSampler": KSampler,
"CheckpointLoader": CheckpointLoader,
Expand All @@ -926,6 +989,7 @@ def invert(self, image):
"LoadImageMask": LoadImageMask,
"ImageScale": ImageScale,
"ImageInvert": ImageInvert,
"ImagePadForOutpaint": ImagePadForOutpaint,
"ConditioningCombine": ConditioningCombine,
"ConditioningSetArea": ConditioningSetArea,
"KSamplerAdvanced": KSamplerAdvanced,
Expand Down
Loading