Skip to content

Commit 83f619a

Browse files
committed
actually just fix every single casing mixup
1 parent 9fe47f4 commit 83f619a

File tree

4 files changed

+55
-55
lines changed

4 files changed

+55
-55
lines changed

dynthres_comfyui.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,13 +63,13 @@ def patch(self, model, mimic_scale, threshold_percentile):
6363
dynamic_thresh = DynThresh(mimic_scale, threshold_percentile, "CONSTANT", 0, "CONSTANT", 0, 0, 0, 999, False, "MEAN", "AD", 1)
6464

6565
def sampler_dyn_thrash(args):
66-
x_out = args["cond"]
66+
cond = args["cond"]
6767
uncond = args["uncond"]
6868
cond_scale = args["cond_scale"]
6969
time_step = args["timestep"]
7070
dynamic_thresh.step = 999 - time_step[0]
7171

72-
return dynamic_thresh.dynthresh(x_out, uncond, cond_scale, None)
72+
return dynamic_thresh.dynthresh(cond, uncond, cond_scale, None)
7373

7474
m = model.clone()
7575
m.set_model_sampler_cfg_function(sampler_dyn_thrash)

dynthres_core.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def __init__(self, mimic_scale, threshold_percentile, mimic_mode, mimic_scale_mi
2323
self.variability_measure = variability_measure
2424
self.interpolate_phi = interpolate_phi
2525

26-
def interpretScale(self, scale, mode, min):
26+
def interpret_scale(self, scale, mode, min):
2727
scale -= min
2828
max = self.max_steps - 1
2929
frac = self.step / max
@@ -56,8 +56,8 @@ def interpretScale(self, scale, mode, min):
5656
return scale
5757

5858
def dynthresh(self, cond, uncond, cfg_scale, weights):
59-
mimic_scale = self.interpretScale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min)
60-
cfg_scale = self.interpretScale(cfg_scale, self.cfg_mode, self.cfg_scale_min)
59+
mimic_scale = self.interpret_scale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min)
60+
cfg_scale = self.interpret_scale(cfg_scale, self.cfg_mode, self.cfg_scale_min)
6161
# uncond shape is (batch, 4, height, width)
6262
conds_per_batch = cond.shape[0] / uncond.shape[0]
6363
assert conds_per_batch == int(conds_per_batch), "Expected # of conds per batch to be constant across batches"
@@ -116,13 +116,13 @@ def dynthresh(self, cond, uncond, cfg_scale, weights):
116116
### Now add it back onto the averages to get into real scale again and return
117117
result = cfg_renormalized + cfg_means
118118

119-
actualRes = result.unflatten(2, mim_target.shape[2:])
119+
actual_res = result.unflatten(2, mim_target.shape[2:])
120120

121121
if self.interpolate_phi != 1.0:
122-
actualRes = actualRes * self.interpolate_phi + cfg_target * (1.0 - self.interpolate_phi)
122+
actual_res = actual_res * self.interpolate_phi + cfg_target * (1.0 - self.interpolate_phi)
123123

124124
if self.experiment_mode == 1:
125-
num = actualRes.cpu().numpy()
125+
num = actual_res.cpu().numpy()
126126
for y in range(0, 64):
127127
for x in range (0, 64):
128128
if num[0][0][y][x] > 1.0:
@@ -131,19 +131,19 @@ def dynthresh(self, cond, uncond, cfg_scale, weights):
131131
num[0][1][y][x] *= 0.5
132132
if num[0][2][y][x] > 1.5:
133133
num[0][2][y][x] *= 0.5
134-
actualRes = torch.from_numpy(num).to(device=uncond.device)
134+
actual_res = torch.from_numpy(num).to(device=uncond.device)
135135
elif self.experiment_mode == 2:
136-
num = actualRes.cpu().numpy()
136+
num = actual_res.cpu().numpy()
137137
for y in range(0, 64):
138138
for x in range (0, 64):
139-
overScale = False
139+
over_scale = False
140140
for z in range(0, 4):
141141
if abs(num[0][z][y][x]) > 1.5:
142-
overScale = True
143-
if overScale:
142+
over_scale = True
143+
if over_scale:
144144
for z in range(0, 4):
145145
num[0][z][y][x] *= 0.7
146-
actualRes = torch.from_numpy(num).to(device=uncond.device)
146+
actual_res = torch.from_numpy(num).to(device=uncond.device)
147147
elif self.experiment_mode == 3:
148148
coefs = torch.tensor([
149149
# R G B W
@@ -152,16 +152,16 @@ def dynthresh(self, cond, uncond, cfg_scale, weights):
152152
[-0.158, 0.189, 0.264, 0.0], # L3
153153
[-0.184, -0.271, -0.473, 1.0], # L4
154154
], device=uncond.device)
155-
resRGB = torch.einsum("laxy,ab -> lbxy", actualRes, coefs)
156-
maxR, maxG, maxB, maxW = resRGB[0][0].max(), resRGB[0][1].max(), resRGB[0][2].max(), resRGB[0][3].max()
157-
maxRGB = max(maxR, maxG, maxB)
158-
print(f"test max = r={maxR}, g={maxG}, b={maxB}, w={maxW}, rgb={maxRGB}")
155+
res_rgb = torch.einsum("laxy,ab -> lbxy", actual_res, coefs)
156+
max_r, max_g, max_b, max_w = res_rgb[0][0].max(), res_rgb[0][1].max(), res_rgb[0][2].max(), res_rgb[0][3].max()
157+
max_rgb = max(max_r, max_g, max_b)
158+
print(f"test max = r={max_r}, g={max_g}, b={max_b}, w={max_w}, rgb={max_rgb}")
159159
if self.step / (self.max_steps - 1) > 0.2:
160-
if maxRGB < 2.0 and maxW < 3.0:
161-
resRGB /= maxRGB / 2.4
160+
if max_rgb < 2.0 and max_w < 3.0:
161+
res_rgb /= max_rgb / 2.4
162162
else:
163-
if maxRGB > 2.4 and maxW > 3.0:
164-
resRGB /= maxRGB / 2.4
165-
actualRes = torch.einsum("laxy,ab -> lbxy", resRGB, coefs.inverse())
163+
if max_rgb > 2.4 and max_w > 3.0:
164+
res_rgb /= max_rgb / 2.4
165+
actual_res = torch.einsum("laxy,ab -> lbxy", res_rgb, coefs.inverse())
166166

167-
return actualRes
167+
return actual_res

dynthres_unipc.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# (It has hooks but not in useful locations)
1717
# I stripped the original comments for brevity.
1818
# Some never-used code (scheduler modes, noise modes, guidance modes) have been removed as well for brevity.
19-
# The actual impl comes down to just the last line in particular, and the `beforeSample` insert to track step count.
19+
# The actual impl comes down to just the last line in particular, and the `before_sample` insert to track step count.
2020

2121
class CustomUniPCSampler(uni_pc.sampler.UniPCSampler):
2222
def __init__(self, model, **kwargs):
@@ -50,16 +50,16 @@ def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals
5050
img = x_T
5151
ns = uni_pc.uni_pc.NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
5252
model_type = "v" if self.model.parameterization == "v" else "noise"
53-
model_fn = CustomUniPC_model_wrapper(lambda x, t, c: self.model.apply_model(x, t, c), ns, model_type=model_type, guidance_scale=unconditional_guidance_scale, dtData=self.main_class)
53+
model_fn = CustomUniPC_model_wrapper(lambda x, t, c: self.model.apply_model(x, t, c), ns, model_type=model_type, guidance_scale=unconditional_guidance_scale, dt_data=self.main_class)
5454
self.main_class.step = 0
55-
def beforeSample(x, t, cond, uncond):
55+
def before_sample(x, t, cond, uncond):
5656
self.main_class.step += 1
5757
return self.before_sample(x, t, cond, uncond)
58-
uni_pc_inst = uni_pc.uni_pc.UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=beforeSample, after_sample=self.after_sample, after_update=self.after_update)
58+
uni_pc_inst = uni_pc.uni_pc.UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=before_sample, after_sample=self.after_sample, after_update=self.after_update)
5959
x = uni_pc_inst.sample(img, steps=S, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
6060
return x.to(device), None
6161

62-
def CustomUniPC_model_wrapper(model, noise_schedule, model_type="noise", model_kwargs={}, guidance_scale=1.0, dtData=None):
62+
def CustomUniPC_model_wrapper(model, noise_schedule, model_type="noise", model_kwargs={}, guidance_scale=1.0, dt_data=None):
6363
def expand_dims(v, dims):
6464
return v[(...,) + (None,)*(dims - 1)]
6565
def get_model_input_time(t_continuous):
@@ -107,5 +107,5 @@ def model_fn(x, t_continuous, condition, unconditional_condition):
107107
c_in = torch.cat([unconditional_condition, condition])
108108
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
109109
#return noise_uncond + guidance_scale * (noise - noise_uncond)
110-
return dtData.dynthresh(noise, noise_uncond, guidance_scale, None)
110+
return dt_data.dynthresh(noise, noise_uncond, guidance_scale, None)
111111
return model_fn

scripts/dynamic_thresholding.py

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ def show(self, is_img2img):
3939
return scripts.AlwaysVisible
4040

4141
def ui(self, is_img2img):
42-
def vis_change(isVis):
43-
return {"visible": isVis, "__type__": "update"}
42+
def vis_change(is_vis):
43+
return {"visible": is_vis, "__type__": "update"}
4444
# "Dynamic Thresholding (CFG Scale Fix)"
4545
dtrue = gr.Checkbox(value=True, visible=False)
4646
dfalse = gr.Checkbox(value=False, visible=False)
@@ -64,11 +64,11 @@ def vis_change(isVis):
6464
separate_feature_channels = gr.Checkbox(value=True, label="Separate Feature Channels", elem_id='dynthres_separate_feature_channels')
6565
scaling_startpoint = gr.Radio(["ZERO", "MEAN"], value="MEAN", label="Scaling Startpoint")
6666
variability_measure = gr.Radio(["STD", "AD"], value="AD", label="Variability Measure")
67-
def shouldShowSchedulerValue(cfgMode, mimicMode):
68-
sched_vis = cfgMode in MODES_WITH_VALUE or mimicMode in MODES_WITH_VALUE
69-
return vis_change(sched_vis), vis_change(mimicMode != "Constant"), vis_change(cfgMode != "Constant")
70-
cfg_mode.change(shouldShowSchedulerValue, inputs=[cfg_mode, mimic_mode], outputs=[sched_val, mimic_scale_min, cfg_scale_min])
71-
mimic_mode.change(shouldShowSchedulerValue, inputs=[cfg_mode, mimic_mode], outputs=[sched_val, mimic_scale_min, cfg_scale_min])
67+
def should_show_scheduler_value(cfg_mode, mimic_mode):
68+
sched_vis = cfg_mode in MODES_WITH_VALUE or mimic_mode in MODES_WITH_VALUE
69+
return vis_change(sched_vis), vis_change(mimic_mode != "Constant"), vis_change(cfg_mode != "Constant")
70+
cfg_mode.change(should_show_scheduler_value, inputs=[cfg_mode, mimic_mode], outputs=[sched_val, mimic_scale_min, cfg_scale_min])
71+
mimic_mode.change(should_show_scheduler_value, inputs=[cfg_mode, mimic_mode], outputs=[sched_val, mimic_scale_min, cfg_scale_min])
7272
enabled.change(
7373
_js="dynthres_update_enabled",
7474
fn=lambda x, y: {"visible": x, "__type__": "update"},
@@ -143,34 +143,34 @@ def make_sampler(orig_sampler_name):
143143

144144
# Make a placeholder sampler
145145
sampler = sd_samplers.all_samplers_map[orig_sampler_name]
146-
dtData = dynthres_core.DynThresh(mimic_scale, threshold_percentile, mimic_mode, mimic_scale_min, cfg_mode, cfg_scale_min, sched_val, experiment_mode, p.steps, separate_feature_channels, scaling_startpoint, variability_measure, interpolate_phi)
146+
dt_data = dynthres_core.DynThresh(mimic_scale, threshold_percentile, mimic_mode, mimic_scale_min, cfg_mode, cfg_scale_min, sched_val, experiment_mode, p.steps, separate_feature_channels, scaling_startpoint, variability_measure, interpolate_phi)
147147
if orig_sampler_name == "UniPC":
148-
def uniPCConstructor(model):
149-
return CustomVanillaSDSampler(dynthres_unipc.CustomUniPCSampler, model, dtData)
150-
newSampler = sd_samplers_common.SamplerData(fixed_sampler_name, uniPCConstructor, sampler.aliases, sampler.options)
148+
def unipc_constructor(model):
149+
return CustomVanillaSDSampler(dynthres_unipc.CustomUniPCSampler, model, dt_data)
150+
new_sampler = sd_samplers_common.SamplerData(fixed_sampler_name, unipc_constructor, sampler.aliases, sampler.options)
151151
else:
152-
def newConstructor(model):
152+
def new_constructor(model):
153153
result = sampler.constructor(model)
154-
cfg = CustomCFGDenoiser(result if IS_AUTO_16 else result.model_wrap_cfg.inner_model, dtData)
154+
cfg = CustomCFGDenoiser(result if IS_AUTO_16 else result.model_wrap_cfg.inner_model, dt_data)
155155
result.model_wrap_cfg = cfg
156156
return result
157-
newSampler = sd_samplers_common.SamplerData(fixed_sampler_name, newConstructor, sampler.aliases, sampler.options)
158-
return fixed_sampler_name, newSampler
157+
new_sampler = sd_samplers_common.SamplerData(fixed_sampler_name, new_constructor, sampler.aliases, sampler.options)
158+
return fixed_sampler_name, new_sampler
159159

160160
# Apply for usage
161161
p.orig_sampler_name = orig_sampler_name
162162
p.orig_latent_sampler_name = orig_latent_sampler_name
163163
p.fixed_samplers = []
164164

165165
if orig_latent_sampler_name:
166-
latent_sampler_name, latentSampler = make_sampler(orig_latent_sampler_name)
167-
sd_samplers.all_samplers_map[latent_sampler_name] = latentSampler
166+
latent_sampler_name, latent_sampler = make_sampler(orig_latent_sampler_name)
167+
sd_samplers.all_samplers_map[latent_sampler_name] = latent_sampler
168168
p.fixed_samplers.append(latent_sampler_name)
169169
p.latent_sampler = latent_sampler_name
170170

171171
if orig_sampler_name != orig_latent_sampler_name:
172-
p.sampler_name, newSampler = make_sampler(orig_sampler_name)
173-
sd_samplers.all_samplers_map[p.sampler_name] = newSampler
172+
p.sampler_name, new_sampler = make_sampler(orig_sampler_name)
173+
sd_samplers.all_samplers_map[p.sampler_name] = new_sampler
174174
p.fixed_samplers.append(p.sampler_name)
175175
else:
176176
p.sampler_name = p.latent_sampler
@@ -193,16 +193,16 @@ def postprocess_batch(self, p, enabled, mimic_scale, threshold_percentile, mimic
193193
######################### CompVis Implementation logic #########################
194194

195195
class CustomVanillaSDSampler(sd_samplers_compvis.VanillaStableDiffusionSampler):
196-
def __init__(self, constructor, sd_model, dtData):
196+
def __init__(self, constructor, sd_model, dt_data):
197197
super().__init__(constructor, sd_model)
198-
self.sampler.main_class = dtData
198+
self.sampler.main_class = dt_data
199199

200200
######################### K-Diffusion Implementation logic #########################
201201

202202
class CustomCFGDenoiser(cfgdenoisekdiff):
203-
def __init__(self, model, dtData):
203+
def __init__(self, model, dt_data):
204204
super().__init__(model)
205-
self.main_class = dtData
205+
self.main_class = dt_data
206206

207207
def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
208208
if isinstance(uncond, dict) and 'crossattn' in uncond:
@@ -258,11 +258,11 @@ def confirm_scheduler(p, xs):
258258
if not any("[DynThres]" in x.label for x in xyz_grid.axis_options):
259259
xyz_grid.axis_options.extend(extra_axis_options)
260260

261-
def callbackBeforeUi():
261+
def callback_before_ui():
262262
try:
263263
make_axis_options()
264264
except Exception as e:
265265
traceback.print_exc()
266266
print(f"Failed to add support for X/Y/Z Plot Script because: {e}")
267267

268-
script_callbacks.on_before_ui(callbackBeforeUi)
268+
script_callbacks.on_before_ui(callback_before_ui)

0 commit comments

Comments
 (0)