diff --git a/scripts/deepfloydif.py b/scripts/deepfloydif.py deleted file mode 100644 index bf211ef..0000000 --- a/scripts/deepfloydif.py +++ /dev/null @@ -1,114 +0,0 @@ -from modules import errors -try: - from diffusers import IFPipeline, IFSuperResolutionPipeline, IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline -except ImportError as e: - errors.print_error_explanation('RESTART AUTOMATIC1111 COMPLETELY TO FINISH INSTALLING PACKAGES FOR kandinsky-for-automatic1111') - - -import os -import gc -import torch -import numpy as np -from PIL import Image, ImageOps, ImageFilter -from packaging import version -from modules import processing, shared, script_callbacks, images, devices, scripts, masking, sd_models, generation_parameters_copypaste, sd_vae#, sd_samplers -from modules.processing import Processed, StableDiffusionProcessing -from modules.shared import opts, state -from modules.sd_models import CheckpointInfo -from modules.paths_internal import script_path -from huggingface_hub import login - - -import sys -sys.path.append('extensions/kandinsky-for-automatic1111/scripts') -from abstract_model import AbstractModel -#import pdb - -class IFModel(AbstractModel): - pipe = None - - def __init__(self): - AbstractModel.__init__(self, "IF") - self.stageI_model = "XL" - self.stageII_model = "L" - self.stages = [] - - def load_encoder(self): - try: - self.pipe = self.load_pipeline("pipe", IFPipeline, f"DeepFloyd/IF-I-{self.stageI_model}-v1.0", {"safety_checker": None, "watermarker": None}) - except FileNotFoundError as fe: - errors.print_error_explanation(f'File {fe.filename} not found. Did you forget the Hugging Face token?') - - def run_encoder(self, prior_settings_dict): - tup = None - if self.pipe is None: - errors.print_error_explanation(f'Stage I {self.stageI_model} not loaded. Did you forget the Hugging Face token?') - elif prior_settings_dict.get("negative_prompt", None) is None: - tup = self.pipe.encode_prompt(prompt=prior_settings_dict["prompt"]) - else: - tup = self.pipe.encode_prompt(prompt=prior_settings_dict["prompt"], negative_prompt=prior_settings_dict["negative_prompt"]) - return tup - - def encoder_to_cpu(self): - pass - - def main_model_to_cpu(self): - pass - - def cleanup_on_error(self): - del self.pipe - gc.collect() - devices.torch_gc() - - def next_stage(self): - self.cleanup_on_error() - - def sd_processing_to_dict_encoder(self, p: StableDiffusionProcessing): - torch.manual_seed(p.seed) - parameters_dict = {"generator": p.generators, "prompt": p.prompt} - - if p.negative_prompt != "": - parameters_dict["negative_prompt"] = p.negative_prompt - - return parameters_dict - - def sd_processing_to_dict_generator(self, p: StableDiffusionProcessing): - generation_parameters = {#"prompt": p.prompt, "negative_prompt": p.negative_prompt, - "prompt_embeds": p.image_embeds, "negative_prompt_embeds": p.negative_image_embeds, - "height": p.height, "width": p.width, "guidance_scale": p.cfg_scale, "num_inference_steps": p.steps} - return generation_parameters - - def txt2img(self, p, generation_parameters, b): - if self.current_stage == 1: - if p.disable_stage_I: - result_images = [p.init_image for _ in range(p.batch_size)] - else: - self.pipe = self.load_pipeline("pipe", IFPipeline, f"DeepFloyd/IF-I-{self.stageI_model}-v1.0", {"safety_checker": None, "watermarker": None}) - result_images = self.pipe(**generation_parameters, num_images_per_prompt=p.batch_size).images - - elif self.current_stage == 2: - generation_parameters["width"] = p.width2 - generation_parameters["height"] = p.height2 - self.pipe = self.load_pipeline("pipe", IFSuperResolutionPipeline, f"DeepFloyd/IF-II-{self.stageII_model}-v1.0", {"image": p.init_image, "safety_checker": None, "watermarker": None}) - result_images = self.pipe(**generation_parameters, num_images_per_prompt=p.batch_size).images - return result_images - - def img2img(self, p, generation_parameters, b): - generation_parameters["strength"] = p.denoising_strength - generation_parameters["image"] = p.init_image - if self.current_stage == 1: - if p.disable_stage_I: - result_images = [p.init_image for _ in range(p.batch_size)] - else: - self.pipe = self.load_pipeline("pipe", IFImg2ImgPipeline, f"DeepFloyd/IF-I-{self.stageI_model}-v1.0", {"safety_checker": None, "watermarker": None}) - result_images = self.pipe(**generation_parameters, num_images_per_prompt=p.batch_size).images - - elif self.current_stage == 2: - generation_parameters["width"] = p.width2 - generation_parameters["height"] = p.height2 - self.pipe = self.load_pipeline("pipe", IFImg2ImgSuperResolutionPipeline, f"DeepFloyd/IF-II-{self.stageII_model}-v1.0", {"safety_checker": None, "watermarker": None}) - result_images = self.pipe(**generation_parameters, num_images_per_prompt=p.batch_size).images - return result_images - - def inpaint(self, p, generation_parameters, b): - pass diff --git a/scripts/deepfloydif_script.py b/scripts/deepfloydif_script.py deleted file mode 100644 index 9396cde..0000000 --- a/scripts/deepfloydif_script.py +++ /dev/null @@ -1,117 +0,0 @@ -from modules import errors -try: - from huggingface_hub import login -except ImportError as e: - errors.print_error_explanation('RESTART AUTOMATIC1111 COMPLETELY TO FINISH INSTALLING PACKAGES FOR kandinsky-for-automatic1111') - - -import sys -import torch -import gradio as gr -from modules import processing, shared, script_callbacks, scripts -from modules.processing import Processed -#import pkg_resources -#import pdb - -sys.path.append('extensions/kandinsky-for-automatic1111/scripts') -from deepfloydif import * - -def unload_model(): - if shared.sd_model is None: - shared.sd_model = IFModel() - print("Unloaded Stable Diffusion model") - return - - if not isinstance(shared.sd_model, IFModel): - sd_models.unload_model_weights() - sd_vae.clear_loaded_vae() - devices.torch_gc() - gc.collect() - torch.cuda.empty_cache() - shared.sd_model = IFModel() - -def reload_model(): - if shared.sd_model is None or isinstance(shared.sd_model, IFModel): - shared.sd_model = None - sd_models.reload_model_weights() - devices.torch_gc() - gc.collect() - torch.cuda.empty_cache() - -def unload_if_model(): - if getattr(shared, "if_model", None) is not None: - if getattr(shared.if_model, "pipe_prior", None) is not None: - del shared.if_model.pipe_prior - devices.torch_gc() - gc.collect() - torch.cuda.empty_cache() - - if getattr(shared.if_model, "pipe", None) is not None: - del shared.if_model.pipe - devices.torch_gc() - gc.collect() - torch.cuda.empty_cache() - - del shared.if_model - print("Unloaded IF model") - - else: - print("No IF model to unload") - -class Script(scripts.Script): - def title(self): - return "IF" - - def ui(self, is_img2img): - gr.Markdown("To save VRAM unload the Stable Diffusion Model") - unload_sd_model = None - reload_sd_model = None - unload_k_model = None - - with gr.Row(): - unload_sd_model = gr.Button("Unload Stable Diffusion Model") - unload_sd_model.click(unload_model) - reload_sd_model = gr.Button("Reload Stable Diffusion Model") - reload_sd_model.click(reload_model) - unload_k_model = gr.Button("Unload IF Model") - unload_k_model.click(unload_if_model) - - stageI_model = None - stageII_model = None - checkbox = None - with gr.Row(): - with gr.Column(): - stageI_model = gr.inputs.Dropdown(label="Stage I Model Type", choices=["M", "L", "XL"], default="XL") - checkbox = gr.inputs.Checkbox(label="Disable Stage I", default=False) - stageII_model = gr.inputs.Dropdown(label="Stage II Model Type", choices=["None", "M", "L"], default="L") - - token_textbox = gr.inputs.Textbox(label="Hugging Face Token", type="password") - - inputs = [token_textbox, stageI_model, stageII_model, checkbox] - - return inputs - - def run(self, p, token, stageI_model, stageII_model, disable_stage_I) -> Processed: - p.sampler_name = "DDPM" - p.init_image = getattr(p, 'init_images', None) - p.extra_generation_params["Script"] = "if" - p.width2 = p.width * 4 - p.height2 = p.height * 4 - p.disable_stage_I = disable_stage_I - - shared.if_model = getattr(shared, 'if_model', None) - - if shared.if_model is None: - shared.if_model = IFModel() - shared.if_model.stageI_model = stageI_model - shared.if_model.stageII_model = stageII_model - if token != "": - login(token=token) - - shared.if_model.stages = [] - shared.if_model.stages.append(1) - - if stageII_model != "None": - shared.if_model.stages.append(2) - - return shared.if_model.process_images(p)