Skip to content

Commit

Permalink
Merge pull request kohya-ss#308 from bmaltais/dev
Browse files Browse the repository at this point in the history
Remove legacy 8bit adam checkbox
  • Loading branch information
bmaltais authored Mar 5, 2023
2 parents d1b8af9 + 09939ff commit 7882b5d
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 122 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ This will store your a backup file with your current locally installed pip packa

## Change History

* 2023/03/05 (v21.1.4):
- Removing legacy and confusing use 8bit adam chackbox. It is now configured using the Optimiser drop down list. It will be set properly based on legacy config files.
* 2023/03/04 (v21.1.3):
- Fix progress bar being displayed when not required.
- Add support for linux, thank you @devNegative-asm
Expand Down
24 changes: 12 additions & 12 deletions dreambooth_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
gradio_training,
gradio_config,
gradio_source_model,
set_legacy_8bitadam,
# set_legacy_8bitadam,
update_my_data,
)
from library.tensorboard_gui import (
Expand Down Expand Up @@ -72,7 +72,7 @@ def save_configuration(
full_fp16,
no_token_padding,
stop_text_encoder_training,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down Expand Up @@ -173,7 +173,7 @@ def open_configuration(
full_fp16,
no_token_padding,
stop_text_encoder_training,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down Expand Up @@ -253,7 +253,7 @@ def train_model(
full_fp16,
no_token_padding,
stop_text_encoder_training_pct,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down Expand Up @@ -443,7 +443,7 @@ def train_model(
gradient_checkpointing=gradient_checkpointing,
full_fp16=full_fp16,
xformers=xformers,
use_8bit_adam=use_8bit_adam,
# use_8bit_adam=use_8bit_adam,
keep_tokens=keep_tokens,
persistent_data_loader_workers=persistent_data_loader_workers,
bucket_no_upscale=bucket_no_upscale,
Expand Down Expand Up @@ -622,7 +622,7 @@ def dreambooth_tab(
show_progress=False,
)
(
use_8bit_adam,
# use_8bit_adam,
xformers,
full_fp16,
gradient_checkpointing,
Expand Down Expand Up @@ -650,11 +650,11 @@ def dreambooth_tab(
inputs=[color_aug],
outputs=[cache_latents],
)
optimizer.change(
set_legacy_8bitadam,
inputs=[optimizer, use_8bit_adam],
outputs=[optimizer, use_8bit_adam],
)
# optimizer.change(
# set_legacy_8bitadam,
# inputs=[optimizer, use_8bit_adam],
# outputs=[optimizer, use_8bit_adam],
# )
with gr.Tab('Tools'):
gr.Markdown(
'This section provide Dreambooth tools to help setup your dataset...'
Expand Down Expand Up @@ -708,7 +708,7 @@ def dreambooth_tab(
full_fp16,
no_token_padding,
stop_text_encoder_training,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down
24 changes: 12 additions & 12 deletions finetune_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
gradio_source_model,
color_aug_changed,
run_cmd_training,
set_legacy_8bitadam,
# set_legacy_8bitadam,
update_my_data,
)
from library.tensorboard_gui import (
Expand Down Expand Up @@ -70,7 +70,7 @@ def save_configuration(
create_buckets,
save_model_as,
caption_extension,
use_8bit_adam,
# use_8bit_adam,
xformers,
clip_skip,
save_state,
Expand Down Expand Up @@ -177,7 +177,7 @@ def open_config_file(
create_buckets,
save_model_as,
caption_extension,
use_8bit_adam,
# use_8bit_adam,
xformers,
clip_skip,
save_state,
Expand Down Expand Up @@ -263,7 +263,7 @@ def train_model(
generate_image_buckets,
save_model_as,
caption_extension,
use_8bit_adam,
# use_8bit_adam,
xformers,
clip_skip,
save_state,
Expand Down Expand Up @@ -429,7 +429,7 @@ def train_model(
gradient_checkpointing=gradient_checkpointing,
full_fp16=full_fp16,
xformers=xformers,
use_8bit_adam=use_8bit_adam,
# use_8bit_adam=use_8bit_adam,
keep_tokens=keep_tokens,
persistent_data_loader_workers=persistent_data_loader_workers,
bucket_no_upscale=bucket_no_upscale,
Expand Down Expand Up @@ -618,7 +618,7 @@ def finetune_tab():
label='Gradient accumulate steps', value='1'
)
(
use_8bit_adam,
# use_8bit_adam,
xformers,
full_fp16,
gradient_checkpointing,
Expand Down Expand Up @@ -646,11 +646,11 @@ def finetune_tab():
inputs=[color_aug],
outputs=[cache_latents], # Not applicable to fine_tune.py
)
optimizer.change(
set_legacy_8bitadam,
inputs=[optimizer, use_8bit_adam],
outputs=[optimizer, use_8bit_adam],
)
# optimizer.change(
# set_legacy_8bitadam,
# inputs=[optimizer, use_8bit_adam],
# outputs=[optimizer, use_8bit_adam],
# )

button_run = gr.Button('Train model', variant='primary')

Expand Down Expand Up @@ -699,7 +699,7 @@ def finetune_tab():
create_buckets,
save_model_as,
caption_extension,
use_8bit_adam,
# use_8bit_adam,
xformers,
clip_skip,
save_state,
Expand Down
95 changes: 21 additions & 74 deletions library/common_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,12 @@


def update_my_data(my_data):
if my_data.get('use_8bit_adam', False):
if my_data.get('use_8bit_adam', False) == True:
my_data['optimizer'] = 'AdamW8bit'
my_data['use_8bit_adam'] = False
# my_data['use_8bit_adam'] = False

if my_data.get('optimizer', 'missing') == 'missing' and my_data.get('use_8bit_adam', False) == False:
my_data['optimizer'] = 'AdamW'

if my_data.get('model_list', 'custom') == []:
print('Old config with empty model list. Setting to custom...')
Expand Down Expand Up @@ -92,17 +95,17 @@ def remove_doublequote(file_path):
return file_path


def set_legacy_8bitadam(optimizer, use_8bit_adam):
if optimizer == 'AdamW8bit':
# use_8bit_adam = True
return gr.Dropdown.update(value=optimizer), gr.Checkbox.update(
value=True, interactive=False, visible=True
)
else:
# use_8bit_adam = False
return gr.Dropdown.update(value=optimizer), gr.Checkbox.update(
value=False, interactive=False, visible=True
)
# def set_legacy_8bitadam(optimizer, use_8bit_adam):
# if optimizer == 'AdamW8bit':
# # use_8bit_adam = True
# return gr.Dropdown.update(value=optimizer), gr.Checkbox.update(
# value=True, interactive=False, visible=True
# )
# else:
# # use_8bit_adam = False
# return gr.Dropdown.update(value=optimizer), gr.Checkbox.update(
# value=False, interactive=False, visible=True
# )


def get_folder_path(folder_path=''):
Expand Down Expand Up @@ -584,30 +587,6 @@ def run_cmd_training(**kwargs):
return run_cmd


# # This function takes a dictionary of keyword arguments and returns a string that can be used to run a command-line training script
# def run_cmd_training(**kwargs):
# arg_map = {
# 'learning_rate': ' --learning_rate="{}"',
# 'lr_scheduler': ' --lr_scheduler="{}"',
# 'lr_warmup_steps': ' --lr_warmup_steps="{}"',
# 'train_batch_size': ' --train_batch_size="{}"',
# 'max_train_steps': ' --max_train_steps="{}"',
# 'save_every_n_epochs': ' --save_every_n_epochs="{}"',
# 'mixed_precision': ' --mixed_precision="{}"',
# 'save_precision': ' --save_precision="{}"',
# 'seed': ' --seed="{}"',
# 'caption_extension': ' --caption_extension="{}"',
# 'cache_latents': ' --cache_latents',
# 'optimizer': ' --use_lion_optimizer' if kwargs.get('optimizer') == 'Lion' else '',
# }

# options = [arg_map[key].format(value) for key, value in kwargs.items() if key in arg_map and value]

# cmd = ''.join(options)

# return cmd


def gradio_advanced_training():
with gr.Row():
keep_tokens = gr.Slider(
Expand Down Expand Up @@ -641,9 +620,9 @@ def gradio_advanced_training():
)
with gr.Row():
# This use_8bit_adam element should be removed in a future release as it is no longer used
use_8bit_adam = gr.Checkbox(
label='Use 8bit adam', value=False, visible=False
)
# use_8bit_adam = gr.Checkbox(
# label='Use 8bit adam', value=False, visible=False
# )
xformers = gr.Checkbox(label='Use xformers', value=True)
color_aug = gr.Checkbox(label='Color augmentation', value=False)
flip_aug = gr.Checkbox(label='Flip augmentation', value=False)
Expand Down Expand Up @@ -689,7 +668,7 @@ def gradio_advanced_training():
placeholder='(Optional) Override number of epoch. Default: 8',
)
return (
use_8bit_adam,
# use_8bit_adam,
xformers,
full_fp16,
gradient_checkpointing,
Expand Down Expand Up @@ -753,7 +732,7 @@ def run_cmd_advanced_training(**kwargs):
else '',
' --full_fp16' if kwargs.get('full_fp16') else '',
' --xformers' if kwargs.get('xformers') else '',
' --use_8bit_adam' if kwargs.get('use_8bit_adam') else '',
# ' --use_8bit_adam' if kwargs.get('use_8bit_adam') else '',
' --persistent_data_loader_workers'
if kwargs.get('persistent_data_loader_workers')
else '',
Expand All @@ -765,35 +744,3 @@ def run_cmd_advanced_training(**kwargs):
]
run_cmd = ''.join(options)
return run_cmd


# def run_cmd_advanced_training(**kwargs):
# arg_map = {
# 'max_train_epochs': ' --max_train_epochs="{}"',
# 'max_data_loader_n_workers': ' --max_data_loader_n_workers="{}"',
# 'max_token_length': ' --max_token_length={}' if int(kwargs.get('max_token_length', 75)) > 75 else '',
# 'clip_skip': ' --clip_skip={}' if int(kwargs.get('clip_skip', 1)) > 1 else '',
# 'resume': ' --resume="{}"',
# 'keep_tokens': ' --keep_tokens="{}"' if int(kwargs.get('keep_tokens', 0)) > 0 else '',
# 'caption_dropout_every_n_epochs': ' --caption_dropout_every_n_epochs="{}"' if int(kwargs.get('caption_dropout_every_n_epochs', 0)) > 0 else '',
# 'caption_dropout_rate': ' --caption_dropout_rate="{}"' if float(kwargs.get('caption_dropout_rate', 0)) > 0 else '',
# 'bucket_reso_steps': ' --bucket_reso_steps={:d}' if int(kwargs.get('bucket_reso_steps', 64)) >= 1 else '',
# 'save_state': ' --save_state',
# 'mem_eff_attn': ' --mem_eff_attn',
# 'color_aug': ' --color_aug',
# 'flip_aug': ' --flip_aug',
# 'shuffle_caption': ' --shuffle_caption',
# 'gradient_checkpointing': ' --gradient_checkpointing',
# 'full_fp16': ' --full_fp16',
# 'xformers': ' --xformers',
# 'use_8bit_adam': ' --use_8bit_adam',
# 'persistent_data_loader_workers': ' --persistent_data_loader_workers',
# 'bucket_no_upscale': ' --bucket_no_upscale',
# 'random_crop': ' --random_crop',
# }

# options = [arg_map[key].format(value) for key, value in kwargs.items() if key in arg_map and value]

# cmd = ''.join(options)

# return cmd
24 changes: 12 additions & 12 deletions lora_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
gradio_config,
gradio_source_model,
run_cmd_training,
set_legacy_8bitadam,
# set_legacy_8bitadam,
update_my_data,
)
from library.dreambooth_folder_creation_gui import (
Expand Down Expand Up @@ -77,7 +77,7 @@ def save_configuration(
full_fp16,
no_token_padding,
stop_text_encoder_training,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down Expand Up @@ -188,7 +188,7 @@ def open_configuration(
full_fp16,
no_token_padding,
stop_text_encoder_training,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down Expand Up @@ -285,7 +285,7 @@ def train_model(
full_fp16,
no_token_padding,
stop_text_encoder_training_pct,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down Expand Up @@ -533,7 +533,7 @@ def train_model(
gradient_checkpointing=gradient_checkpointing,
full_fp16=full_fp16,
xformers=xformers,
use_8bit_adam=use_8bit_adam,
# use_8bit_adam=use_8bit_adam,
keep_tokens=keep_tokens,
persistent_data_loader_workers=persistent_data_loader_workers,
bucket_no_upscale=bucket_no_upscale,
Expand Down Expand Up @@ -793,7 +793,7 @@ def LoRA_type_change(LoRA_type):
placeholder='(Optional) For Cosine with restart and polynomial only',
)
(
use_8bit_adam,
# use_8bit_adam,
xformers,
full_fp16,
gradient_checkpointing,
Expand Down Expand Up @@ -822,11 +822,11 @@ def LoRA_type_change(LoRA_type):
outputs=[cache_latents],
)

optimizer.change(
set_legacy_8bitadam,
inputs=[optimizer, use_8bit_adam],
outputs=[optimizer, use_8bit_adam],
)
# optimizer.change(
# set_legacy_8bitadam,
# inputs=[optimizer, use_8bit_adam],
# outputs=[optimizer, use_8bit_adam],
# )

with gr.Tab('Tools'):
gr.Markdown(
Expand Down Expand Up @@ -885,7 +885,7 @@ def LoRA_type_change(LoRA_type):
full_fp16,
no_token_padding,
stop_text_encoder_training,
use_8bit_adam,
# use_8bit_adam,
xformers,
save_model_as,
shuffle_caption,
Expand Down
Loading

0 comments on commit 7882b5d

Please sign in to comment.