Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 52 additions & 38 deletions comfy_extras/nodes_qwen.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,29 @@
import node_helpers
import comfy.utils
import math
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io


class TextEncodeQwenImageEdit:
class TextEncodeQwenImageEdit(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"clip": ("CLIP", ),
"prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}),
},
"optional": {"vae": ("VAE", ),
"image": ("IMAGE", ),}}
def define_schema(cls):
return io.Schema(
node_id="TextEncodeQwenImageEdit",
category="advanced/conditioning",
inputs=[
io.Clip.Input("clip"),
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
io.Vae.Input("vae", optional=True),
io.Image.Input("image", optional=True),
],
outputs=[
io.Conditioning.Output(),
],
)

RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "encode"

CATEGORY = "advanced/conditioning"

def encode(self, clip, prompt, vae=None, image=None):
@classmethod
def execute(cls, clip, prompt, vae=None, image=None) -> io.NodeOutput:
ref_latent = None
if image is None:
images = []
Expand All @@ -40,28 +45,30 @@ def encode(self, clip, prompt, vae=None, image=None):
conditioning = clip.encode_from_tokens_scheduled(tokens)
if ref_latent is not None:
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
return (conditioning, )
return io.NodeOutput(conditioning)


class TextEncodeQwenImageEditPlus:
class TextEncodeQwenImageEditPlus(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"clip": ("CLIP", ),
"prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}),
},
"optional": {"vae": ("VAE", ),
"image1": ("IMAGE", ),
"image2": ("IMAGE", ),
"image3": ("IMAGE", ),
}}

RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "encode"

CATEGORY = "advanced/conditioning"

def encode(self, clip, prompt, vae=None, image1=None, image2=None, image3=None):
def define_schema(cls):
return io.Schema(
node_id="TextEncodeQwenImageEditPlus",
category="advanced/conditioning",
inputs=[
io.Clip.Input("clip"),
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
io.Vae.Input("vae", optional=True),
io.Image.Input("image1", optional=True),
io.Image.Input("image2", optional=True),
io.Image.Input("image3", optional=True),
],
outputs=[
io.Conditioning.Output(),
],
)

@classmethod
def execute(cls, clip, prompt, vae=None, image1=None, image2=None, image3=None) -> io.NodeOutput:
ref_latents = []
images = [image1, image2, image3]
images_vl = []
Expand Down Expand Up @@ -94,10 +101,17 @@ def encode(self, clip, prompt, vae=None, image1=None, image2=None, image3=None):
conditioning = clip.encode_from_tokens_scheduled(tokens)
if len(ref_latents) > 0:
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True)
return (conditioning, )
return io.NodeOutput(conditioning)


class QwenExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
TextEncodeQwenImageEdit,
TextEncodeQwenImageEditPlus,
]


NODE_CLASS_MAPPINGS = {
"TextEncodeQwenImageEdit": TextEncodeQwenImageEdit,
"TextEncodeQwenImageEditPlus": TextEncodeQwenImageEditPlus,
}
async def comfy_entrypoint() -> QwenExtension:
return QwenExtension()
Loading