Skip to content

Commit

Permalink
Update nodes.py
Browse files Browse the repository at this point in the history
  • Loading branch information
kijai committed Dec 9, 2024
1 parent a01b079 commit 0968d87
Showing 1 changed file with 19 additions and 4 deletions.
23 changes: 19 additions & 4 deletions nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -940,15 +940,18 @@ def decode(self, vae, samples, enable_vae_tiling, temporal_tiling_sample_size, s
out = out.permute(0, 2, 3, 1).cpu().float()

return (out,)


#region VideoEncode
class HyVideoEncode:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"vae": ("VAE",),
"image": ("IMAGE",),
"enable_vae_tiling": ("BOOLEAN", {"default": True, "tooltip": "Drastically reduces memory use but may introduce seams"}),
"temporal_tiling_sample_size": ("INT", {"default": 16, "min": 4, "max": 256, "tooltip": "Smaller values use less VRAM, model default is 64 which doesn't fit on most GPUs"}),
"temporal_tiling_sample_size": ("INT", {"default": 16, "min": 4, "max": 256, "tooltip": "Smaller values use less VRAM, model default is 64, any other value will cause stutter"}),
"spatial_tile_sample_min_size": ("INT", {"default": 256, "min": 32, "max": 2048, "step": 32, "tooltip": "Spatial tile minimum size in pixels, smaller values use less VRAM, may introduce more seams"}),
"auto_tile_size": ("BOOLEAN", {"default": True, "tooltip": "Automatically set tile size based on defaults, above settings are ignored"}),
},
}

Expand All @@ -957,13 +960,25 @@ def INPUT_TYPES(s):
FUNCTION = "encode"
CATEGORY = "HunyuanVideoWrapper"

def encode(self, vae, image, enable_vae_tiling, temporal_tiling_sample_size):
def encode(self, vae, image, enable_vae_tiling, temporal_tiling_sample_size, auto_tile_size, spatial_tile_sample_min_size):
device = mm.get_torch_device()
offload_device = mm.unet_offload_device()

generator = torch.Generator(device=torch.device("cpu"))#.manual_seed(seed)
vae.to(device)
vae.sample_tsize = temporal_tiling_sample_size
if not auto_tile_size:
vae.tile_latent_min_tsize = temporal_tiling_sample_size // 4
vae.tile_sample_min_size = spatial_tile_sample_min_size
vae.tile_latent_min_size = spatial_tile_sample_min_size // 8
if temporal_tiling_sample_size != 64:
vae.t_tile_overlap_factor = 0.0
else:
vae.t_tile_overlap_factor = 0.25
else:
#defaults
vae.tile_latent_min_tsize = 64
vae.tile_sample_min_size = 256
vae.tile_latent_min_size = 32

image = (image * 2.0 - 1.0).to(vae.dtype).to(device).unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W
if enable_vae_tiling:
Expand Down

0 comments on commit 0968d87

Please sign in to comment.