diff --git a/networks/resize_lora.py b/networks/resize_lora.py index 5bf8b3c3a..d697baa4c 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -5,10 +5,12 @@ import os import argparse import torch -from safetensors.torch import load_file, save_file +from safetensors.torch import load_file, save_file, safe_open from tqdm import tqdm -from library import train_util import numpy as np + +from library import train_util +from library import model_util from library.utils import setup_logging setup_logging() @@ -36,16 +38,18 @@ def load_state_dict(file_name, dtype): return sd, metadata -def save_to_file(file_name, model, state_dict, dtype, metadata): + +def save_to_file(file_name, state_dict, dtype, metadata): if dtype is not None: for key in list(state_dict.keys()): if type(state_dict[key]) == torch.Tensor: state_dict[key] = state_dict[key].to(dtype) if model_util.is_safetensors(file_name): - save_file(model, file_name, metadata) + save_file(state_dict, file_name, metadata) else: - torch.save(model, file_name) + torch.save(state_dict, file_name) + # Indexing functions @@ -62,18 +66,18 @@ def index_sv_cumulative(S, target): def index_sv_fro(S, target): S_squared = S.pow(2) S_fro_sq = float(torch.sum(S_squared)) - sum_S_squared = torch.cumsum(S_squared, dim=0)/S_fro_sq + sum_S_squared = torch.cumsum(S_squared, dim=0) / S_fro_sq index = int(torch.searchsorted(sum_S_squared, target**2)) + 1 - index = max(1, min(index, len(S)-1)) + index = max(1, min(index, len(S) - 1)) return index def index_sv_ratio(S, target): max_sv = S[0] - min_sv = max_sv/target + min_sv = max_sv / target index = int(torch.sum(S > min_sv).item()) - index = max(1, min(index, len(S)-1)) + index = max(1, min(index, len(S) - 1)) return index @@ -169,10 +173,10 @@ def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1): if S[0] <= MIN_SV: # Zero matrix, set dim to 1 new_rank = 1 - new_alpha = float(scale*new_rank) + new_alpha = float(scale * new_rank) elif new_rank > rank: # cap max rank at rank new_rank = rank - new_alpha = float(scale*new_rank) + new_alpha = float(scale * new_rank) # Calculate resize info s_sum = torch.sum(torch.abs(S)) @@ -200,19 +204,21 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna # Extract loaded lora dim and alpha for key, value in lora_sd.items(): - if network_alpha is None and 'alpha' in key: + if network_alpha is None and "alpha" in key: network_alpha = value - if network_dim is None and 'lora_down' in key and len(value.size()) == 2: + if network_dim is None and "lora_down" in key and len(value.size()) == 2: network_dim = value.size()[0] if network_alpha is not None and network_dim is not None: break if network_alpha is None: network_alpha = network_dim - scale = network_alpha/network_dim + scale = network_alpha / network_dim if dynamic_method: - logger.info(f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}, max rank is {new_rank}") + logger.info( + f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}, max rank is {new_rank}" + ) lora_down_weight = None lora_up_weight = None @@ -224,8 +230,8 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna with torch.no_grad(): for key, value in tqdm(lora_sd.items()): weight_name = None - if 'lora_down' in key: - block_down_name = key.rsplit('.lora_down', 1)[0] + if "lora_down" in key: + block_down_name = key.rsplit(".lora_down", 1)[0] weight_name = key.rsplit(".", 1)[-1] lora_down_weight = value else: @@ -233,18 +239,18 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna # find corresponding lora_up and alpha block_up_name = block_down_name - lora_up_weight = lora_sd.get(block_up_name + '.lora_up.' + weight_name, None) - lora_alpha = lora_sd.get(block_down_name + '.alpha', None) + lora_up_weight = lora_sd.get(block_up_name + ".lora_up." + weight_name, None) + lora_alpha = lora_sd.get(block_down_name + ".alpha", None) - weights_loaded = (lora_down_weight is not None and lora_up_weight is not None) + weights_loaded = lora_down_weight is not None and lora_up_weight is not None if weights_loaded: - conv2d = (len(lora_down_weight.size()) == 4) + conv2d = len(lora_down_weight.size()) == 4 if lora_alpha is None: scale = 1.0 else: - scale = lora_alpha/lora_down_weight.size()[0] + scale = lora_alpha / lora_down_weight.size()[0] if conv2d: full_weight_matrix = merge_conv(lora_down_weight, lora_up_weight, device) @@ -254,24 +260,26 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna param_dict = extract_linear(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale) if verbose: - max_ratio = param_dict['max_ratio'] - sum_retained = param_dict['sum_retained'] - fro_retained = param_dict['fro_retained'] + max_ratio = param_dict["max_ratio"] + sum_retained = param_dict["sum_retained"] + fro_retained = param_dict["fro_retained"] if not np.isnan(fro_retained): fro_list.append(float(fro_retained)) verbose_str += f"{block_down_name:75} | " - verbose_str += f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}" + verbose_str += ( + f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}" + ) if verbose and dynamic_method: verbose_str += f", dynamic | dim: {param_dict['new_rank']}, alpha: {param_dict['new_alpha']}\n" else: verbose_str += "\n" - new_alpha = param_dict['new_alpha'] + new_alpha = param_dict["new_alpha"] o_lora_sd[block_down_name + "." + "lora_down.weight"] = param_dict["lora_down"].to(save_dtype).contiguous() o_lora_sd[block_up_name + "." + "lora_up.weight"] = param_dict["lora_up"].to(save_dtype).contiguous() - o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(param_dict['new_alpha']).to(save_dtype) + o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(param_dict["new_alpha"]).to(save_dtype) block_down_name = None block_up_name = None @@ -281,38 +289,36 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna del param_dict if verbose: - logger.info(verbose_str) - - logger.info(f"Average Frobenius norm retention: {np.mean(fro_list):.2%} | std: {np.std(fro_list):0.3f}") + print(verbose_str) + print(f"Average Frobenius norm retention: {np.mean(fro_list):.2%} | std: {np.std(fro_list):0.3f}") logger.info("resizing complete") return o_lora_sd, network_dim, new_alpha def resize(args): - if ( - args.save_to is None or - not (args.save_to.endswith('.ckpt') or - args.save_to.endswith('.pt') or - args.save_to.endswith('.pth') or - args.save_to.endswith('.safetensors')) - ): + if args.save_to is None or not ( + args.save_to.endswith(".ckpt") + or args.save_to.endswith(".pt") + or args.save_to.endswith(".pth") + or args.save_to.endswith(".safetensors") + ): raise Exception("The --save_to argument must be specified and must be a .ckpt , .pt, .pth or .safetensors file.") args.new_conv_rank = args.new_conv_rank if args.new_conv_rank is not None else args.new_rank def str_to_dtype(p): - if p == 'float': + if p == "float": return torch.float - if p == 'fp16': + if p == "fp16": return torch.float16 - if p == 'bf16': + if p == "bf16": return torch.bfloat16 return None if args.dynamic_method and not args.dynamic_param: raise Exception("If using dynamic_method, then dynamic_param is required") - merge_dtype = str_to_dtype('float') # matmul method above only seems to work in float32 + merge_dtype = str_to_dtype("float") # matmul method above only seems to work in float32 save_dtype = str_to_dtype(args.save_precision) if save_dtype is None: save_dtype = merge_dtype @@ -321,7 +327,9 @@ def str_to_dtype(p): lora_sd, metadata = load_state_dict(args.model, merge_dtype) logger.info("Resizing Lora...") - state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, args.new_conv_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose) + state_dict, old_dim, new_alpha = resize_lora_model( + lora_sd, args.new_rank, args.new_conv_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose + ) # update metadata if metadata is None: @@ -330,47 +338,73 @@ def str_to_dtype(p): comment = metadata.get("ss_training_comment", "") if not args.dynamic_method: - metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}; {comment}" + conv_desc = "" if args.new_rank == args.new_conv_rank else f" (conv: {args.new_conv_rank})" + metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}{conv_desc}; {comment}" metadata["ss_network_dim"] = str(args.new_rank) metadata["ss_network_alpha"] = str(new_alpha) else: - metadata["ss_training_comment"] = f"Dynamic resize with {args.dynamic_method}: {args.dynamic_param} from {old_dim}; {comment}" - metadata["ss_network_dim"] = 'Dynamic' - metadata["ss_network_alpha"] = 'Dynamic' + metadata["ss_training_comment"] = ( + f"Dynamic resize with {args.dynamic_method}: {args.dynamic_param} from {old_dim}; {comment}" + ) + metadata["ss_network_dim"] = "Dynamic" + metadata["ss_network_alpha"] = "Dynamic" model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata) metadata["sshs_model_hash"] = model_hash metadata["sshs_legacy_hash"] = legacy_hash logger.info(f"saving model to: {args.save_to}") - save_to_file(args.save_to, state_dict, state_dict, save_dtype, metadata) + save_to_file(args.save_to, state_dict, save_dtype, metadata) def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() - parser.add_argument("--save_precision", type=str, default=None, - choices=[None, "float", "fp16", "bf16"], help="precision in saving, float if omitted / 保存時の精度、未指定時はfloat") - parser.add_argument("--new_rank", type=int, default=4, - help="Specify rank of output LoRA / 出力するLoRAのrank (dim)") - parser.add_argument("--new_conv_rank", type=int, default=None, - help="Specify rank of output LoRA for Conv2d 3x3, None for same as new_rank / 出力するConv2D 3x3 LoRAのrank (dim)、Noneでnew_rankと同じ") - parser.add_argument("--save_to", type=str, default=None, - help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors") - parser.add_argument("--model", type=str, default=None, - help="LoRA model to resize at to new rank: ckpt or safetensors file / 読み込むLoRAモデル、ckptまたはsafetensors") - parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う") - parser.add_argument("--verbose", action="store_true", - help="Display verbose resizing information / rank変更時の詳細情報を出力する") - parser.add_argument("--dynamic_method", type=str, default=None, choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"], - help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank") - parser.add_argument("--dynamic_param", type=float, default=None, - help="Specify target for dynamic reduction") + parser.add_argument( + "--save_precision", + type=str, + default=None, + choices=[None, "float", "fp16", "bf16"], + help="precision in saving, float if omitted / 保存時の精度、未指定時はfloat", + ) + parser.add_argument("--new_rank", type=int, default=4, help="Specify rank of output LoRA / 出力するLoRAのrank (dim)") + parser.add_argument( + "--new_conv_rank", + type=int, + default=None, + help="Specify rank of output LoRA for Conv2d 3x3, None for same as new_rank / 出力するConv2D 3x3 LoRAのrank (dim)、Noneでnew_rankと同じ", + ) + parser.add_argument( + "--save_to", + type=str, + default=None, + help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors", + ) + parser.add_argument( + "--model", + type=str, + default=None, + help="LoRA model to resize at to new rank: ckpt or safetensors file / 読み込むLoRAモデル、ckptまたはsafetensors", + ) + parser.add_argument( + "--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う" + ) + parser.add_argument( + "--verbose", action="store_true", help="Display verbose resizing information / rank変更時の詳細情報を出力する" + ) + parser.add_argument( + "--dynamic_method", + type=str, + default=None, + choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"], + help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank", + ) + parser.add_argument("--dynamic_param", type=float, default=None, help="Specify target for dynamic reduction") return parser - -if __name__ == '__main__': + +if __name__ == "__main__": parser = setup_parser() args = parser.parse_args()