-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsdxl.py
55 lines (46 loc) · 1.34 KB
/
sdxl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
from diffusers import DiffusionPipeline
import platform
def torch_device():
if torch.cuda.is_available():
return "cuda"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
return "mps"
return "cpu"
def pipeline(
model="stabilityai/stable-diffusion-xl-base-1.0",
device=torch_device(),
watermark=False,
low_vram=False,
):
torch_dtype = torch.float16
variant = "fp16"
# MacOS can only use fp32
if device == "mps":
torch_dtype = torch.float32
variant = "fp32"
pipe = DiffusionPipeline.from_pretrained(
model,
torch_dtype=torch_dtype,
use_safetensors=True,
variant=variant,
)
# enable VAE titling and slicing if low VRAM
if low_vram:
pipe.enable_vae_tiling()
pipe.enable_vae_slicing()
# model offloading to save memory
if low_vram and device == "cuda":
pipe.enable_model_cpu_offload()
else:
pipe.to(device)
# Apply torch.compile only if OS is not Windows
if platform.system() != "Windows":
pipe.unit = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
# mock out watermark if needed
if not watermark:
pipe.watermark = NoWatermark()
return pipe
class NoWatermark:
def apply_watermark(self, img):
return img