Skip to content

Commit

Permalink
Fix various typos
Browse files Browse the repository at this point in the history
  • Loading branch information
calvinballing committed Dec 15, 2022
1 parent 685f963 commit c0355ca
Show file tree
Hide file tree
Showing 16 changed files with 68 additions and 68 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Use VAEs
- Estimated completion time in progress bar
- API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions

## Installation and Running
Expand Down
24 changes: 12 additions & 12 deletions javascript/contextMenus.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ contextMenuInit = function(){

function showContextMenu(event,element,menuEntries){
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;

let oldMenu = gradioApp().querySelector('#context-menu')
if(oldMenu){
Expand Down Expand Up @@ -61,15 +61,15 @@ contextMenuInit = function(){

}

function appendContextMenuOption(targetEmementSelector,entryName,entryFunction){
currentItems = menuSpecs.get(targetEmementSelector)
function appendContextMenuOption(targetElementSelector,entryName,entryFunction){

currentItems = menuSpecs.get(targetElementSelector)

if(!currentItems){
currentItems = []
menuSpecs.set(targetEmementSelector,currentItems);
menuSpecs.set(targetElementSelector,currentItems);
}
let newItem = {'id':targetEmementSelector+'_'+uid(),
let newItem = {'id':targetElementSelector+'_'+uid(),
'name':entryName,
'func':entryFunction,
'isNew':true}
Expand Down Expand Up @@ -97,7 +97,7 @@ contextMenuInit = function(){
if(source.id && source.id.indexOf('check_progress')>-1){
return
}

let oldMenu = gradioApp().querySelector('#context-menu')
if(oldMenu){
oldMenu.remove()
Expand All @@ -117,7 +117,7 @@ contextMenuInit = function(){
})
});
eventListenerApplied=true

}

return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener]
Expand Down Expand Up @@ -152,8 +152,8 @@ addContextMenuEventListener = initResponse[2];
generateOnRepeat('#img2img_generate','#img2img_interrupt');
})

let cancelGenerateForever = function(){
clearInterval(window.generateOnRepeatInterval)
let cancelGenerateForever = function(){
clearInterval(window.generateOnRepeatInterval)
}

appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever)
Expand All @@ -162,7 +162,7 @@ addContextMenuEventListener = initResponse[2];
appendContextMenuOption('#img2img_generate', 'Cancel generate forever',cancelGenerateForever)

appendContextMenuOption('#roll','Roll three',
function(){
function(){
let rollbutton = get_uiCurrentTabContent().querySelector('#roll');
setTimeout(function(){rollbutton.click()},100)
setTimeout(function(){rollbutton.click()},200)
Expand Down
12 changes: 6 additions & 6 deletions javascript/progressbar.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ global_progressbars = {}
galleries = {}
galleryObservers = {}

// this tracks laumnches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running
// this tracks launches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running
timeoutIds = {}

function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){
Expand All @@ -20,21 +20,21 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip

var skip = id_skip ? gradioApp().getElementById(id_skip) : null
var interrupt = gradioApp().getElementById(id_interrupt)

if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){
if(progressbar.innerText){
let newtitle = '[' + progressbar.innerText.trim() + '] Stable Diffusion';
if(document.title != newtitle){
document.title = newtitle;
document.title = newtitle;
}
}else{
let newtitle = 'Stable Diffusion'
if(document.title != newtitle){
document.title = newtitle;
document.title = newtitle;
}
}
}

if(progressbar!= null && progressbar != global_progressbars[id_progressbar]){
global_progressbars[id_progressbar] = progressbar

Expand Down Expand Up @@ -63,7 +63,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
skip.style.display = "none"
}
interrupt.style.display = "none"

//disconnect observer once generation finished, so user can close selected image if they want
if (galleryObservers[id_gallery]) {
galleryObservers[id_gallery].disconnect();
Expand Down
2 changes: 1 addition & 1 deletion javascript/ui.js
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ function create_submit_args(args){

// As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image.
// This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate.
// I don't know why gradio is seding outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some.
// I don't know why gradio is sending outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some.
// If gradio at some point stops sending outputs, this may break something
if(Array.isArray(res[res.length - 3])){
res[res.length - 3] = null
Expand Down
18 changes: 9 additions & 9 deletions modules/api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,10 @@ def encode_pil_to_base64(image):
class Api:
def __init__(self, app: FastAPI, queue_lock: Lock):
if shared.cmd_opts.api_auth:
self.credenticals = dict()
self.credentials = dict()
for auth in shared.cmd_opts.api_auth.split(","):
user, password = auth.split(":")
self.credenticals[user] = password
self.credentials[user] = password

self.router = APIRouter()
self.app = app
Expand All @@ -93,7 +93,7 @@ def __init__(self, app: FastAPI, queue_lock: Lock):
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem])
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
self.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem])
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem])
self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str])
self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem])

Expand All @@ -102,9 +102,9 @@ def add_api_route(self, path: str, endpoint, **kwargs):
return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs)
return self.app.add_api_route(path, endpoint, **kwargs)

def auth(self, credenticals: HTTPBasicCredentials = Depends(HTTPBasic())):
if credenticals.username in self.credenticals:
if compare_digest(credenticals.password, self.credenticals[credenticals.username]):
def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())):
if credentials.username in self.credentials:
if compare_digest(credentials.password, self.credentials[credentials.username]):
return True

raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"})
Expand Down Expand Up @@ -239,7 +239,7 @@ def progressapi(self, req: ProgressRequest = Depends()):
def interrogateapi(self, interrogatereq: InterrogateRequest):
image_b64 = interrogatereq.image
if image_b64 is None:
raise HTTPException(status_code=404, detail="Image not found")
raise HTTPException(status_code=404, detail="Image not found")

img = decode_base64_to_image(image_b64)
img = img.convert('RGB')
Expand All @@ -252,7 +252,7 @@ def interrogateapi(self, interrogatereq: InterrogateRequest):
processed = deepbooru.model.tag(img)
else:
raise HTTPException(status_code=404, detail="Model not found")

return InterrogateResponse(caption=processed)

def interruptapi(self):
Expand Down Expand Up @@ -308,7 +308,7 @@ def get_face_restorers(self):
def get_realesrgan_models(self):
return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)]

def get_promp_styles(self):
def get_prompt_styles(self):
styleList = []
for k in shared.prompt_styles.styles:
style = shared.prompt_styles.styles[k]
Expand Down
2 changes: 1 addition & 1 deletion modules/api/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ class ExtrasBaseRequest(BaseModel):
upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.")
upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?")
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
Expand Down
4 changes: 2 additions & 2 deletions modules/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Expand Down Expand Up @@ -590,7 +590,7 @@ def read_info_from_image(image):
Negative prompt: {json_info["uc"]}
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
except Exception:
print(f"Error parsing NovelAI iamge generation parameters:", file=sys.stderr)
print(f"Error parsing NovelAI image generation parameters:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)

return geninfo, items
Expand Down
14 changes: 7 additions & 7 deletions modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,11 +147,11 @@ def txt2img_image_conditioning(self, x, width=None, height=None):

# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))
image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))

# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
image_conditioning = image_conditioning.to(x.dtype)

return image_conditioning

Expand Down Expand Up @@ -199,7 +199,7 @@ def inpainting_image_conditioning(self, source_image, latent_image, image_mask =
source_image * (1.0 - conditioning_mask),
getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
)

# Encode the new masked image using first stage of network.
conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))

Expand Down Expand Up @@ -537,7 +537,7 @@ def infotext(iteration=0, position_in_batch=0):
for n in range(p.n_iter):
if state.skipped:
state.skipped = False

if state.interrupted:
break

Expand Down Expand Up @@ -612,7 +612,7 @@ def infotext(iteration=0, position_in_batch=0):
image.info["parameters"] = text
output_images.append(image)

del x_samples_ddim
del x_samples_ddim

devices.torch_gc()

Expand Down Expand Up @@ -704,7 +704,7 @@ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subs

samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]

"""saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images"""
"""saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
def save_intermediate(image, index):
if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
return
Expand All @@ -720,7 +720,7 @@ def save_intermediate(image, index):

samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")

# Avoid making the inpainting conditioning unless necessary as
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
Expand Down
4 changes: 2 additions & 2 deletions modules/safe.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def check_pt(filename, extra_handler):
# new pytorch format is a zip file
with zipfile.ZipFile(filename) as z:
check_zip_filenames(filename, z.namelist())

# find filename of data.pkl in zip file: '<directory name>/data.pkl'
data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)]
if len(data_pkl_filenames) == 0:
Expand Down Expand Up @@ -108,7 +108,7 @@ def load(filename, *args, **kwargs):

def load_with_extra(filename, extra_handler=None, *args, **kwargs):
"""
this functon is intended to be used by extensions that want to load models with
this function is intended to be used by extensions that want to load models with
some extra classes in them that the usual unpickler would find suspicious.
Use the extra_handler argument to specify a function that takes module and field name as text,
Expand Down
4 changes: 2 additions & 2 deletions modules/scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def title(self):
def ui(self, is_img2img):
"""this function should create gradio UI elements. See https://gradio.app/docs/#components
The return value should be an array of all components that are used in processing.
Values of those returned componenbts will be passed to run() and process() functions.
Values of those returned components will be passed to run() and process() functions.
"""

pass
Expand All @@ -47,7 +47,7 @@ def show(self, is_img2img):
This function should return:
- False if the script should not be shown in UI at all
- True if the script should be shown in UI if it's scelected in the scripts drowpdown
- True if the script should be shown in UI if it's selected in the scripts dropdown
- script.AlwaysVisible if the script should be shown in UI at all times
"""

Expand Down
6 changes: 3 additions & 3 deletions modules/sd_hijack_inpainting.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def get_model_output(x, t):
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)

if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
Expand Down Expand Up @@ -278,7 +278,7 @@ def get_x_prev_and_pred_x0(e_t, index):
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)

return x_prev, pred_x0, e_t

# =================================================================================================
# Monkey patch LatentInpaintDiffusion to load the checkpoint with a proper config.
# Adapted from:
Expand Down Expand Up @@ -326,7 +326,7 @@ def do_inpainting_hijack():
# most of this stuff seems to no longer be needed because it is already included into SD2.0
# LatentInpaintDiffusion remains because SD2.0's LatentInpaintDiffusion can't be loaded without specifying a checkpoint
# p_sample_plms is needed because PLMS can't work with dicts as conditionings
# this file should be cleaned up later if weverything tuens out to work fine
# this file should be cleaned up later if everything turns out to work fine

# ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning
ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion
Expand Down
2 changes: 1 addition & 1 deletion modules/sd_hijack_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
class TorchHijackForUnet:
"""
This is torch, but with cat that resizes tensors to appropriate dimensions if they do not match;
this makes it possible to create pictures with dimensions that are muliples of 8 rather than 64
this makes it possible to create pictures with dimensions that are multiples of 8 rather than 64
"""

def __getattr__(self, item):
Expand Down
Loading

0 comments on commit c0355ca

Please sign in to comment.