Skip to content

Commit

Permalink
Lora support!
Browse files Browse the repository at this point in the history
update readme to reflect some recent changes
  • Loading branch information
AUTOMATIC1111 committed Jan 21, 2023
1 parent cbfb463 commit 855b9e3
Show file tree
Hide file tree
Showing 9 changed files with 314 additions and 4 deletions.
14 changes: 12 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- Possible to change defaults/mix/max/step values for UI elements via text config
- Tiling support, a checkbox to create images that can be tiled like textures
- Progress bar and live image generation preview
- Can use a separate neural network to produce previews with almost none VRAM or compute requirement
- Negative prompt, an extra text field that allows you to list what you don't want to see in generated image
- Styles, a way to save part of prompt and easily apply them via dropdown later
- Variations, a way to generate same image but with tiny differences
Expand All @@ -75,13 +76,22 @@ A browser interface based on Gradio library for Stable Diffusion.
- hypernetworks and embeddings options
- Preprocessing images: cropping, mirroring, autotagging using BLIP or deepdanbooru (for anime)
- Clip skip
- Use Hypernetworks
- Use VAEs
- Hypernetworks
- Loras (same as Hypernetworks but more pretty)
- A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt.
- Can select to load a different VAE from settings screen
- Estimated completion time in progress bar
- API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
- Now without any bad letters!
- Load checkpoints in safetensors format
- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64
- Now with a license!
- Reorder elements in the UI from settings screen
-

## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
Expand Down
20 changes: 20 additions & 0 deletions extensions-builtin/Lora/extra_networks_lora.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from modules import extra_networks
import lora

class ExtraNetworkLora(extra_networks.ExtraNetwork):
def __init__(self):
super().__init__('lora')

def activate(self, p, params_list):
names = []
multipliers = []
for params in params_list:
assert len(params.items) > 0

names.append(params.items[0])
multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)

lora.load_loras(names, multipliers)

def deactivate(self, p):
pass
198 changes: 198 additions & 0 deletions extensions-builtin/Lora/lora.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
import glob
import os
import re
import torch

from modules import shared, devices, sd_models

re_digits = re.compile(r"\d+")
re_unet_down_blocks = re.compile(r"lora_unet_down_blocks_(\d+)_attentions_(\d+)_(.+)")
re_unet_mid_blocks = re.compile(r"lora_unet_mid_block_attentions_(\d+)_(.+)")
re_unet_up_blocks = re.compile(r"lora_unet_up_blocks_(\d+)_attentions_(\d+)_(.+)")
re_text_block = re.compile(r"lora_te_text_model_encoder_layers_(\d+)_(.+)")


def convert_diffusers_name_to_compvis(key):
def match(match_list, regex):
r = re.match(regex, key)
if not r:
return False

match_list.clear()
match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
return True

m = []

if match(m, re_unet_down_blocks):
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[1]}_1_{m[2]}"

if match(m, re_unet_mid_blocks):
return f"diffusion_model_middle_block_1_{m[1]}"

if match(m, re_unet_up_blocks):
return f"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_1_{m[2]}"

if match(m, re_text_block):
return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"

return key


class LoraOnDisk:
def __init__(self, name, filename):
self.name = name
self.filename = filename


class LoraModule:
def __init__(self, name):
self.name = name
self.multiplier = 1.0
self.modules = {}
self.mtime = None


class LoraUpDownModule:
def __init__(self):
self.up = None
self.down = None


def assign_lora_names_to_compvis_modules(sd_model):
lora_layer_mapping = {}

for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
lora_name = name.replace(".", "_")
lora_layer_mapping[lora_name] = module
module.lora_layer_name = lora_name

for name, module in shared.sd_model.model.named_modules():
lora_name = name.replace(".", "_")
lora_layer_mapping[lora_name] = module
module.lora_layer_name = lora_name

sd_model.lora_layer_mapping = lora_layer_mapping


def load_lora(name, filename):
lora = LoraModule(name)
lora.mtime = os.path.getmtime(filename)

sd = sd_models.read_state_dict(filename)

keys_failed_to_match = []

for key_diffusers, weight in sd.items():
fullkey = convert_diffusers_name_to_compvis(key_diffusers)
key, lora_key = fullkey.split(".", 1)

sd_module = shared.sd_model.lora_layer_mapping.get(key, None)
if sd_module is None:
keys_failed_to_match.append(key_diffusers)
continue

if type(sd_module) == torch.nn.Linear:
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
elif type(sd_module) == torch.nn.Conv2d:
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
else:
assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'

with torch.no_grad():
module.weight.copy_(weight)

module.to(device=devices.device, dtype=devices.dtype)

lora_module = lora.modules.get(key, None)
if lora_module is None:
lora_module = LoraUpDownModule()
lora.modules[key] = lora_module

if lora_key == "lora_up.weight":
lora_module.up = module
elif lora_key == "lora_down.weight":
lora_module.down = module
else:
assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight or lora_down.weight'

if len(keys_failed_to_match) > 0:
print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}")

return lora


def load_loras(names, multipliers=None):
already_loaded = {}

for lora in loaded_loras:
if lora.name in names:
already_loaded[lora.name] = lora

loaded_loras.clear()

loras_on_disk = [available_loras.get(name, None) for name in names]
if any([x is None for x in loras_on_disk]):
list_available_loras()

loras_on_disk = [available_loras.get(name, None) for name in names]

for i, name in enumerate(names):
lora = already_loaded.get(name, None)

lora_on_disk = loras_on_disk[i]
if lora_on_disk is not None:
if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime:
lora = load_lora(name, lora_on_disk.filename)

if lora is None:
print(f"Couldn't find Lora with name {name}")
continue

lora.multiplier = multipliers[i] if multipliers else 1.0
loaded_loras.append(lora)


def lora_forward(module, input, res):
if len(loaded_loras) == 0:
return res

lora_layer_name = getattr(module, 'lora_layer_name', None)
for lora in loaded_loras:
module = lora.modules.get(lora_layer_name, None)
if module is not None:
res = res + module.up(module.down(input)) * lora.multiplier

return res


def lora_Linear_forward(self, input):
return lora_forward(self, input, torch.nn.Linear_forward_before_lora(self, input))


def lora_Conv2d_forward(self, input):
return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora(self, input))


def list_available_loras():
available_loras.clear()

os.makedirs(lora_dir, exist_ok=True)

candidates = glob.glob(os.path.join(lora_dir, '**/*.pt'), recursive=True) + glob.glob(os.path.join(lora_dir, '**/*.safetensors'), recursive=True)

for filename in sorted(candidates):
if os.path.isdir(filename):
continue

name = os.path.splitext(os.path.basename(filename))[0]

available_loras[name] = LoraOnDisk(name, filename)


lora_dir = os.path.join(shared.models_path, "Lora")
available_loras = {}
loaded_loras = []

list_available_loras()

30 changes: 30 additions & 0 deletions extensions-builtin/Lora/scripts/lora_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import torch

import lora
import extra_networks_lora
import ui_extra_networks_lora
from modules import script_callbacks, ui_extra_networks, extra_networks


def unload():
torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora


def before_ui():
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora())


if not hasattr(torch.nn, 'Linear_forward_before_lora'):
torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward

if not hasattr(torch.nn, 'Conv2d_forward_before_lora'):
torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward

torch.nn.Linear.forward = lora.lora_Linear_forward
torch.nn.Conv2d.forward = lora.lora_Conv2d_forward

script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload)
script_callbacks.on_before_ui(before_ui)
35 changes: 35 additions & 0 deletions extensions-builtin/Lora/ui_extra_networks_lora.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import os
import lora

from modules import shared, ui_extra_networks


class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
def __init__(self):
super().__init__('Lora')

def refresh(self):
lora.list_available_loras()

def list_items(self):
for name, lora_on_disk in lora.available_loras.items():
path, ext = os.path.splitext(lora_on_disk.filename)
previews = [path + ".png", path + ".preview.png"]

preview = None
for file in previews:
if os.path.isfile(file):
preview = "./file=" + file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(file))
break

yield {
"name": name,
"filename": path,
"preview": preview,
"prompt": f"<lora:{name}:1.0>",
"local_preview": path + ".png",
}

def allowed_directories_for_previews(self):
return [lora.lora_dir]

2 changes: 1 addition & 1 deletion modules/extra_networks_hypernet.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,5 @@ def activate(self, p, params_list):

hypernetwork.load_hypernetworks(names, multipliers)

def deactivate(p, self):
def deactivate(self, p):
pass
15 changes: 15 additions & 0 deletions modules/script_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def __init__(self, imgs, cols, rows):
callbacks_image_grid=[],
callbacks_infotext_pasted=[],
callbacks_script_unloaded=[],
callbacks_before_ui=[],
)


Expand Down Expand Up @@ -189,6 +190,14 @@ def script_unloaded_callback():
report_exception(c, 'script_unloaded')


def before_ui_callback():
for c in reversed(callback_map['callbacks_before_ui']):
try:
c.callback()
except Exception:
report_exception(c, 'before_ui')


def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
Expand Down Expand Up @@ -313,3 +322,9 @@ def on_script_unloaded(callback):
the script did should be reverted here"""

add_callback(callback_map['callbacks_script_unloaded'], callback)


def on_before_ui(callback):
"""register a function to be called before the UI is created."""

add_callback(callback_map['callbacks_before_ui'], callback)
2 changes: 1 addition & 1 deletion modules/ui_extra_networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


def register_page(page):
"""registers extra networks page for the UI; recommend doing it in on_app_started() callback for extensions"""
"""registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions"""

extra_pages.append(page)

Expand Down
2 changes: 2 additions & 0 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,8 @@ def webui():
if shared.opts.clean_temp_dir_at_start:
ui_tempdir.cleanup_tmpdr()

modules.script_callbacks.before_ui_callback()

shared.demo = modules.ui.create_ui()

app, local_url, share_url = shared.demo.launch(
Expand Down

0 comments on commit 855b9e3

Please sign in to comment.