Skip to content

Commit

Permalink
Merge pull request oobabooga#393 from WojtekKowaluk/mps_support
Browse files Browse the repository at this point in the history
Fix for MPS support on Apple Silicon
  • Loading branch information
oobabooga authored Mar 18, 2023
2 parents dc35861 + e26763a commit bcd8afd
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 2 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ torch-dumps/*
*pycache*
*/*pycache*
*/*/pycache*
venv/
.venv/

settings.json
img_bot*
Expand Down
9 changes: 7 additions & 2 deletions modules/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,12 @@ def load_model(model_name):
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
else:
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda()
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16)
if torch.has_mps:
device = torch.device('mps')
model = model.to(device)
else:
model = model.cuda()

# FlexGen
elif shared.args.flexgen:
Expand Down Expand Up @@ -97,7 +102,7 @@ def load_model(model_name):
# Custom
else:
params = {"low_cpu_mem_usage": True}
if not shared.args.cpu and not torch.cuda.is_available():
if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)):
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
shared.args.cpu = True

Expand Down
3 changes: 3 additions & 0 deletions modules/text_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
return input_ids.numpy()
elif shared.args.deepspeed:
return input_ids.to(device=local_rank)
elif torch.has_mps:
device = torch.device('mps')
return input_ids.to(device)
else:
return input_ids.cuda()

Expand Down

0 comments on commit bcd8afd

Please sign in to comment.