Skip to content

Commit

Permalink
Testing DeepFloydIF
Browse files Browse the repository at this point in the history
  • Loading branch information
MMqd committed Jul 23, 2023
1 parent 84e21d4 commit 92ab7ea
Show file tree
Hide file tree
Showing 5 changed files with 158 additions and 10 deletions.
17 changes: 12 additions & 5 deletions scripts/abstract_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,13 +103,15 @@ def truncate_string(string, max_length=images.max_filename_part_length, encoding

class AbstractModel():
attention_type = 'auto'#'max'
cache_dir = os.path.join(os.path.join(script_path, 'models'), "Kandinsky")
cond_stage_key = "edit"
sd_checkpoint_info = KandinskyCheckpointInfo()
sd_model_hash = sd_checkpoint_info.shorthash
cached_image_embeds = {"settings": {}, "embeds": (None, None)}

def load_pipeline(self, pipe_name: str, pipeline: DiffusionPipeline, pretrained_model_name_or_path):
def __init__(self):
self.cache_dir = os.path.join(os.path.join(script_path, 'models'), '')

def load_pipeline(self, pipe_name: str, pipeline: DiffusionPipeline, pretrained_model_name_or_path, move_to_cuda = True):
pipe = getattr(self, pipe_name, None)

if not isinstance(pipe, pipeline) or pipe is None:
Expand All @@ -118,13 +120,18 @@ def load_pipeline(self, pipe_name: str, pipeline: DiffusionPipeline, pretrained_
gc.collect()
devices.torch_gc()
pipe = pipeline.from_pretrained(pretrained_model_name_or_path, variant="fp16", torch_dtype=torch.float16, cache_dir=self.cache_dir)#, scheduler=dpm)
pipe.to("cuda")
if move_to_cuda:
pipe.to("cuda")
else:
pipe.enable_sequential_cpu_offload()
#pipe.enable_sequential_cpu_offload()
pipe.enable_attention_slicing(self.attention_type)
#pipe.unet.to(memory_format=torch.channels_last)
setattr(self, pipe_name, pipe)
else:
elif move_to_cuda:
pipe.to("cuda")
else:
pipe.enable_sequential_cpu_offload()

return pipe

Expand Down Expand Up @@ -222,7 +229,7 @@ def process_images(self, p: StableDiffusionProcessing) -> Processed:


prior_settings_dict = {"generator": generators, "prompt": p.prompt, "guidance_scale": p.prior_cfg_scale}
prior_settings_dict["num_inference_steps"] = p.inference_steps
prior_settings_dict["num_inference_steps"] = p.prior_inference_steps

if p.negative_prompt != "":
prior_settings_dict["negative_prompt"] = p.negative_prompt
Expand Down
59 changes: 59 additions & 0 deletions scripts/deepfloydif.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from modules import errors
try:
from diffusers import IFPipeline
except ImportError as e:
errors.print_error_explanation('RESTART AUTOMATIC1111 COMPLETELY TO FINISH INSTALLING PACKAGES FOR kandinsky-for-automatic1111')


import os
import gc
import torch
import numpy as np
from PIL import Image, ImageOps, ImageFilter
from packaging import version
from modules import processing, shared, script_callbacks, images, devices, scripts, masking, sd_models, generation_parameters_copypaste, sd_vae#, sd_samplers
from modules.processing import Processed, StableDiffusionProcessing
from modules.shared import opts, state
from modules.sd_models import CheckpointInfo
from modules.paths_internal import script_path

import sys
sys.path.append('extensions/kandinsky-for-automatic1111/scripts')
from abstract_model import AbstractModel
#import pdb

class IFModel(AbstractModel):
pipe = None

def __init__(self):
self.cache_dir = os.path.join(os.path.join(script_path, 'models'), 'IF')

def load_encoder(self):
pass

def run_encoder(self, prior_settings_dict):
if prior_settings_dict.get("negative_prompt", None) is None:
tup = self.pipe.encode_prompt(prompt=prior_settings_dict["prompt"])
else:
tup = self.pipe.encode_prompt(prompt=prior_settings_dict["prompt"], negative_prompt=prior_settings_dict["negative_prompt"])
return tup.to_tuple()

def encoder_to_cpu(self):
pass

def main_model_to_cpu(self):
pass

def cleanup_on_error(self):
pass

def txt2img(self, p, generation_parameters, b):
self.pipe = self.load_pipeline("pipe", IFPipeline, "DeepFloyd/IF-I-XL-v1.0")
result_images = self.pipe(**generation_parameters, num_images_per_prompt=p.batch_size).images
return result_images

def img2img(self, p, generation_parameters, b):
pass

def inpaint(self, p, generation_parameters, b):
pass
79 changes: 79 additions & 0 deletions scripts/deepfloydif_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import sys
import torch
import gradio as gr
from modules import processing, shared, script_callbacks, scripts
from modules.processing import Processed
#import pkg_resources
#import pdb

sys.path.append('extensions/kandinsky-for-automatic1111/scripts')
from deepfloydif import *

def unload_model():
if shared.sd_model is None:
shared.sd_model = IFModel()
print("Unloaded Stable Diffusion model")
return

if not isinstance(shared.sd_model, IFModel):
sd_models.unload_model_weights()
sd_vae.clear_loaded_vae()
devices.torch_gc()
gc.collect()
torch.cuda.empty_cache()
shared.sd_model = IFModel()

def reload_model():
if shared.sd_model is None or isinstance(shared.sd_model, IFModel):
shared.sd_model = None
sd_models.reload_model_weights()
devices.torch_gc()
gc.collect()
torch.cuda.empty_cache()

def unload_if_model():
if shared.if_model.pipe is not None:
del shared.if_model.pipe
devices.torch_gc()
gc.collect()
torch.cuda.empty_cache()

if shared.if_model is not None:
del shared.if_model

print("Unloaded IF model")

class Script(scripts.Script):
def title(self):
return "IF"

def ui(self, is_img2img):
gr.Markdown("To save VRAM unload the Stable Diffusion Model")

unload_sd_model = gr.Button("Unload Stable Diffusion Model")
unload_sd_model.click(unload_model)
reload_sd_model = gr.Button("Reload Stable Diffusion Model")
reload_sd_model.click(reload_model)

unload_k_model = gr.Button("Unload IF Model")
unload_k_model.click(unload_if_model)
with gr.Row():
unload_sd_model
reload_sd_model
unload_k_model

inputs = []

return inputs

def run(self, p) -> Processed:
p.sampler_name = "DDPM"
p.init_image = getattr(p, 'init_images', None)
p.extra_generation_params["Script"] = self.title()

shared.if_model = getattr(shared, 'if_model', None)

if shared.if_model is None:
shared.if_model = IFModel()

return shared.if_model.process_images(p)
3 changes: 3 additions & 0 deletions scripts/kandinsky.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ class KandinskyModel(AbstractModel):
pipe = None
pipe_prior = None

def __init__(self):
self.cache_dir = os.path.join(os.path.join(script_path, 'models'), 'Kandinsky')

def mix_images(self, p, generation_parameters, b, result_images):
if p.extra_image != [] and p.extra_image is not None:
img2 = Image.fromarray(p.extra_image)
Expand Down
10 changes: 5 additions & 5 deletions scripts/kandinsky_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,27 +67,27 @@ def ui(self, is_img2img):
unload_sd_model
reload_sd_model
unload_k_model
inference_steps = gr.inputs.Slider(minimum=2, maximum=1024, step=1, label="Prior Inference Steps", default=128)
prior_inference_steps = gr.inputs.Slider(minimum=2, maximum=1024, step=1, label="Prior Inference Steps", default=128)
prior_cfg_scale = gr.inputs.Slider(minimum=1, maximum=20, step=0.5, label="Prior CFG Scale", default=4)

with gr.Accordion("Image Mixing", open=False):
img1_strength = gr.inputs.Slider(minimum=-2, maximum=2, label="Interpolate Image 1 Strength", default=0.5)
img2_strength = gr.inputs.Slider(minimum=-2, maximum=2, label="Interpolate Image 2 Strength (image below)", default=0.5)
extra_image = gr.inputs.Image()

inputs = [extra_image, inference_steps, prior_cfg_scale, img1_strength, img2_strength]
inputs = [extra_image, prior_inference_steps, prior_cfg_scale, img1_strength, img2_strength]

return inputs

def run(self, p, extra_image, inference_steps, prior_cfg_scale, img1_strength, img2_strength) -> Processed:
def run(self, p, extra_image, prior_inference_steps, prior_cfg_scale, img1_strength, img2_strength) -> Processed:
p.extra_image = extra_image
p.inference_steps = inference_steps
p.prior_inference_steps = prior_inference_steps
p.prior_cfg_scale = prior_cfg_scale
p.img1_strength = img1_strength
p.img2_strength = img2_strength
p.sampler_name = "DDIM"
p.init_image = getattr(p, 'init_images', None)
p.extra_generation_params["Prior Inference Steps"] = inference_steps
p.extra_generation_params["Prior Inference Steps"] = prior_inference_steps
p.extra_generation_params["Prior CFG Scale"] = prior_cfg_scale
p.extra_generation_params["Script"] = self.title()

Expand Down

0 comments on commit 92ab7ea

Please sign in to comment.