Skip to content

Commit 53b6a66

Browse files
authored
Update GPTQ_Loader.py
Correcting decoder layer for renamed class.
1 parent ee164d1 commit 53b6a66

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

modules/GPTQ_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def load_quantized(model_name):
6161
max_memory[i] = f"{shared.args.gpu_memory[i]}GiB"
6262
max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB"
6363

64-
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LLaMADecoderLayer"])
64+
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"])
6565
model = accelerate.dispatch_model(model, device_map=device_map)
6666

6767
# Single GPU

0 commit comments

Comments
 (0)