Skip to content

Commit

Permalink
feat: brings webui.py up to date with comfy config options
Browse files Browse the repository at this point in the history
  • Loading branch information
tazlin committed May 14, 2023
1 parent 203329e commit 329cc43
Showing 1 changed file with 125 additions and 13 deletions.
138 changes: 125 additions & 13 deletions webui.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# webui.py
# Simple web configuration for horde worker
import argparse
import contextlib
import datetime
import glob
import math
Expand Down Expand Up @@ -49,10 +50,6 @@ class WebUI:
"label": "API Key",
"info": "This is your Stable Horde API Key. You can get one free at " "https://stablehorde.net/register ",
},
"low_vram_mode": {
"label": "Enable low vram mode",
"info": "It may help worker stability to disable this if you run multiple threads.",
},
"horde_url": {
"label": "The URL of the horde API server.",
"info": "Don't change this unless you know exactly what you are doing.",
Expand Down Expand Up @@ -96,7 +93,8 @@ class WebUI:
},
"cache_home": {
"label": "Model Directory",
"info": "Downloaded models files are stored here.",
"info": "Downloaded models files are stored here. The default './' is means the AI-Horde-Worker "
"directory (check for a folder name 'nataili' after your first run).",
},
"temp_dir": {
"label": "Model Cache Directory",
Expand Down Expand Up @@ -125,6 +123,12 @@ class WebUI:
"This number includes system models such as the safety checker and the post-processors, so "
"don't set it too low!",
},
"alchemist_name": {
"label": "Alchemist Name",
"info": "This is the name of your Alchemist. It needs to be unique to the whole horde. "
"You cannot run different Alchemists with the same name. It will be publicly visible."
"Defaults to worker_name if not specified",
},
"forms": {
"label": "Alchemy Worker Features",
"info": "Enable or disable the different types of requests accepted by this worker if you"
Expand Down Expand Up @@ -178,14 +182,48 @@ class WebUI:
},
"special_models_to_load": {
"label": "Loading Groups of Models",
"info": "You can select groups of models here. 'All Models' loads all possible models. "
"info": "You can select groups of models here. 'All Models' loads all possible models "
"which will take over 500gb of space in the folder defined by the setting 'cache_home'. "
"The other options load different subsets of models based on style. You can select "
"more than one.",
},
"special_top_models_to_load": {
"label": "Automatically Loading Popular Models",
"info": "Choose to automatically load the top 'n' most popular models of the day.",
},
"ram_to_leave_free": {
"label": "RAM to Leave Free",
"info": "This is the amount of RAM to leave free for your system to use. You should raise this value "
"if you expect to run other programs on your computer while running your worker.",
},
"vram_to_leave_free": {
"label": "VRAM to Leave Free",
"info": "This is the amount of VRAM to leave free for your system to use. ",
},
"scribe_name": {
"label": "Scribe Name",
"info": "This is a the name of your scribe worker. It needs to be unique to the whole horde. "
"You cannot run different workers with the same name. It will be publicly visible. "
"Defaults to worker_name if not specified",
},
"kai_url": {
"label": "Kai URL",
"info": "This is the URL of the Kobold AI Client API you want your worker to connect to. "
"You will probably be running your own Kobold AI Client, and you should enter the URL here.",
},
"max_length": {
"label": "Maximum Length",
"info": "This is the maximum number of tokens your worker will generate per request.",
},
"max_context_length": {
"label": "Maximum Context Length",
"info": "The max tokens to use from the prompt.",
},
"branded_model": {
"label": "Branded Model",
"info": " This will prevent the model from being used from the shared pool, but will ensure that"
" no other worker can pretend to serve it If you are unsure, leave this as 'None'.",
},
}

_models_found_on_disk = None
Expand Down Expand Up @@ -266,6 +304,10 @@ def save_config(self, args):
donekeys.append(key)
elif cfgkey == "models_to_load":
models_to_load.extend(value)
donekeys.append(key)
elif cfgkey == "ram_to_leave_free" or cfgkey == "vram_to_leave_free":
config[cfgkey] = str(value) + "%"
donekeys.append(key)

# Merge the settings we have been passed into the old config,
# don't remove anything we don't understand
Expand Down Expand Up @@ -455,6 +497,17 @@ def initialise(self):
value=config.worker_name,
info=self._info("worker_name"),
)
alchemist_name = gr.Textbox(
label=self._label("alchemist_name"),
value=config.alchemist_name,
info=self._info("alchemist_name"),
)
scribe_name = gr.Textbox(
label=self._label("scribe_name"),
value=config.scribe_name,
info=self._info("scribe_name"),
)

api_key = gr.Textbox(
label=self._label("api_key"),
value=config.api_key,
Expand Down Expand Up @@ -667,6 +720,29 @@ def initialise(self):
value=config.queue_size,
info=self._info("queue_size"),
)
parsed_ram_from_config = 40
with contextlib.suppress(Exception):
parsed_ram_from_config = int(config.ram_to_leave_free.split("%")[0])
ram_to_leave_free = gr.Slider(
0,
100,
step=1,
label=self._label("ram_to_leave_free"),
value=parsed_ram_from_config,
info=self._info("ram_to_leave_free"),
)
parsed_vram_from_config = 40
with contextlib.suppress(Exception):
parsed_vram_from_config = int(config.vram_to_leave_free.split("%")[0])

vram_to_leave_free = gr.Slider(
0,
100,
step=1,
label=self._label("vram_to_leave_free"),
value=parsed_vram_from_config,
info=self._info("vram_to_leave_free"),
)

with gr.Tab("Advanced"), gr.Column():
config.default("enable_terminal_ui", False)
Expand All @@ -681,12 +757,6 @@ def initialise(self):
value=config.horde_url,
info=self._info("horde_url"),
)
config.default("low_vram_mode", True)
low_vram_mode = gr.Checkbox(
label=self._label("low_vram_mode"),
value=config.low_vram_mode,
info=self._info("low_vram_mode"),
)
config.default("stats_output_frequency", 30)
stats_output_frequency = gr.Number(
label=self._label("stats_output_frequency"),
Expand All @@ -713,6 +783,41 @@ def initialise(self):
inputs=[worker_name, worker_id, maintenance_mode, api_key],
outputs=[maint_message],
)
with gr.Tab("Scribe Options"), gr.Column():
gr.Markdown(
"Options for the Scribes (text workers)",
)
config.default("kai_url", "http://localhost:5000")
kai_url = gr.Textbox(
label=self._label("kai_url"),
value=config.kai_url,
info=self._info("kai_url"),
)

config.default("max_length", 80)
max_length = gr.Slider(
0,
240,
step=10,
label=self._label("max_length"),
value=config.max_length,
info=self._info("max_length"),
)
config.default("max_context_length", 1024)
max_context_length = gr.Slider(
0,
8192,
step=128,
label=self._label("max_context_length"),
value=config.max_context_length,
info=self._info("max_context_length"),
)
config.default("branded_model", False)
branded_model = gr.Checkbox(
label=self._label("branded_model"),
value=config.branded_model,
info=self._info("branded_model"),
)

with gr.Row():
submit = gr.Button(value="Save Configuration", variant="primary")
Expand All @@ -722,6 +827,7 @@ def initialise(self):
submit.click(
self.save_config,
inputs={
alchemist_name,
allow_controlnet,
allow_img2img,
allow_painting,
Expand All @@ -736,7 +842,6 @@ def initialise(self):
enable_terminal_ui,
forms,
horde_url,
low_vram_mode,
max_models_to_download,
max_power,
max_threads,
Expand All @@ -754,6 +859,13 @@ def initialise(self):
special_top_models_to_load,
stats_output_frequency,
worker_name,
ram_to_leave_free,
vram_to_leave_free,
scribe_name,
kai_url,
max_length,
max_context_length,
branded_model,
},
outputs=[message],
)
Expand Down

0 comments on commit 329cc43

Please sign in to comment.