Skip to content

Commit

Permalink
Updates
Browse files Browse the repository at this point in the history
  • Loading branch information
bmaltais committed Jul 22, 2023
1 parent b39fd33 commit 278b693
Show file tree
Hide file tree
Showing 5 changed files with 149 additions and 9 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -479,4 +479,6 @@ If you come across a `FileNotFoundError`, it is likely due to an installation is
* 2023/07/18 (v21.8.4)
- Relocate LR number of cycles and LR power options
- Add missing LR number of cycles and LR power to Dreambooth and TI scripts
- Fix issue with conv_block_dims and conv_block_alphas
- Fix issue with conv_block_dims and conv_block_alphas
- Fix 0 noise offset issue
- Implement Stop training button on LoRA
31 changes: 31 additions & 0 deletions library/class_command_executor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import subprocess
import psutil
from library.custom_logging import setup_logging

# Set up logging
log = setup_logging()

class CommandExecutor:
def __init__(self):
self.process = None

def execute_command(self, run_cmd):
if self.process and self.process.poll() is None:
log.info("The command is already running. Please wait for it to finish.")
else:
self.process = subprocess.Popen(run_cmd, shell=True)

def kill_command(self):
if self.process and self.process.poll() is None:
try:
parent = psutil.Process(self.process.pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
log.info("The running process has been terminated.")
except psutil.NoSuchProcess:
log.info("The process does not exist.")
except Exception as e:
log.info(f"Error when terminating process: {e}")
else:
log.info("There is no running process to kill.")
3 changes: 1 addition & 2 deletions library/common_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -755,8 +755,7 @@ def run_cmd_advanced_training(**kwargs):
noise_offset_type = kwargs.get('noise_offset_type', 'Original')
if noise_offset_type == 'Original':
noise_offset = float(kwargs.get("noise_offset", 0))
if noise_offset > 0:
run_cmd += f' --noise_offset={noise_offset}'
run_cmd += f' --noise_offset={noise_offset}'

adaptive_noise_scale = float(kwargs.get("adaptive_noise_scale", 0))
if adaptive_noise_scale != 0 and noise_offset > 0:
Expand Down
25 changes: 19 additions & 6 deletions lora_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import math
import os
import subprocess
import psutil
import pathlib
import argparse
from datetime import datetime
Expand All @@ -33,6 +34,7 @@
from library.class_advanced_training import AdvancedTraining
from library.class_sdxl_parameters import SDXLParameters
from library.class_folders import Folders
from library.class_command_executor import CommandExecutor
from library.tensorboard_gui import (
gradio_tensorboard,
start_tensorboard,
Expand All @@ -47,8 +49,14 @@
# Set up logging
log = setup_logging()

document_symbol = '\U0001F4C4' # 📄
# Setup command executor
executor = CommandExecutor()

button_run = gr.Button('Start training', variant='primary')

button_stop_training = gr.Button('Stop training')

document_symbol = '\U0001F4C4' # 📄

def save_configuration(
save_as,
Expand Down Expand Up @@ -480,6 +488,7 @@ def train_model(
):
# Get list of function parameters and values
parameters = list(locals().items())
global command_running

print_only_bool = True if print_only.get('label') == 'True' else False
log.info(f'Start training LoRA {LoRA_type} ...')
Expand Down Expand Up @@ -973,10 +982,7 @@ def train_model(

log.info(run_cmd)
# Run the command
if os.name == 'posix':
os.system(run_cmd)
else:
subprocess.run(run_cmd)
executor.execute_command(run_cmd=run_cmd)

# check if output_dir/last is a folder... therefore it is a diffuser model
last_dir = pathlib.Path(f'{output_dir}/{output_name}')
Expand Down Expand Up @@ -1402,7 +1408,10 @@ def update_LoRA_settings(LoRA_type):
],
)

button_run = gr.Button('Train model', variant='primary')
with gr.Row():
button_run = gr.Button('Start training', variant='primary')

button_stop_training = gr.Button('Stop training')

button_print = gr.Button('Print training command')

Expand Down Expand Up @@ -1580,6 +1589,10 @@ def update_LoRA_settings(LoRA_type):
inputs=[dummy_headless] + [dummy_db_false] + settings_list,
show_progress=False,
)

button_stop_training.click(
executor.kill_command
)

button_print.click(
train_model,
Expand Down
95 changes: 95 additions & 0 deletions presets/lora/SDXL - LoRA adafactor v1.0.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
{
"LoRA_type": "Standard",
"adaptive_noise_scale": 0.00357,
"additional_parameters": "--log_prefix=xl-loha",
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"bucket_no_upscale": false,
"bucket_reso_steps": 32,
"cache_latents": true,
"cache_latents_to_disk": true,
"caption_dropout_every_n_epochs": 0.0,
"caption_dropout_rate": 0,
"caption_extension": ".txt2",
"clip_skip": "1",
"color_aug": false,
"conv_alpha": 4,
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 4,
"decompose_both": false,
"dim_from_weights": false,
"down_lr_weight": "",
"enable_bucket": true,
"epoch": 30,
"factor": -1,
"flip_aug": false,
"full_bf16": false,
"full_fp16": false,
"gradient_accumulation_steps": 1.0,
"gradient_checkpointing": true,
"keep_tokens": 1,
"learning_rate": 1.0,
"lora_network_weights": "",
"lr_scheduler": "adafactor",
"lr_scheduler_num_cycles": "1",
"lr_scheduler_power": "",
"lr_warmup": 0,
"max_bucket_reso": 2048,
"max_data_loader_n_workers": "0",
"max_resolution": "1024,1024",
"max_timestep": 1000,
"max_token_length": "75",
"max_train_epochs": "30",
"mem_eff_attn": false,
"mid_lr_weight": "",
"min_bucket_reso": 64,
"min_snr_gamma": 0,
"min_timestep": 0,
"mixed_precision": "bf16",
"module_dropout": 0,
"multires_noise_discount": 0,
"multires_noise_iterations": 0,
"network_alpha": 128,
"network_dim": 128,
"network_dropout": 0,
"no_token_padding": false,
"noise_offset": 0.0357,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"optimizer": "Adafactor",
"optimizer_args": "",
"persistent_data_loader_workers": false,
"prior_loss_weight": 1.0,
"random_crop": false,
"rank_dropout": 0,
"save_every_n_epochs": 5,
"save_every_n_steps": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_precision": "bf16",
"scale_v_pred_loss_like_noise_pred": false,
"scale_weight_norms": 1,
"sdxl": true,
"sdxl_cache_text_encoder_outputs": false,
"sdxl_no_half_vae": true,
"seed": "",
"shuffle_caption": false,
"stop_text_encoder_training_pct": 0,
"text_encoder_lr": 1.0,
"train_batch_size": 5,
"train_on_input": false,
"training_comment": "trigger: the white queen",
"unet_lr": 1.0,
"unit": 1,
"up_lr_weight": "",
"use_cp": false,
"use_wandb": false,
"v2": false,
"v_parameterization": false,
"vae_batch_size": 0,
"wandb_api_key": "",
"weighted_captions": false,
"xformers": true
}

0 comments on commit 278b693

Please sign in to comment.