Skip to content

Commit

Permalink
Minor changes
Browse files Browse the repository at this point in the history
  • Loading branch information
oobabooga committed Mar 18, 2023
1 parent 7994b58 commit e26763a
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 10 deletions.
5 changes: 2 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ torch-dumps/*
*pycache*
*/*pycache*
*/*/pycache*
venv/
.venv/

settings.json
img_bot*
Expand All @@ -19,6 +21,3 @@ img_me*
!models/place-your-models-here.txt
!softprompts/place-your-softprompts-here.txt
!torch-dumps/place-your-pt-models-here.txt

venv/
.venv/
8 changes: 2 additions & 6 deletions modules/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,13 @@ def load_model(model_name):
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
else:
model = AutoModelForCausalLM.from_pretrained(
Path(f"models/{shared.model_name}"),
low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16
)
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16)
if torch.has_mps:
device = torch.device('mps')
model = model.to(device)
else:
model = model.cuda()


# FlexGen
elif shared.args.flexgen:
# Initialize environment
Expand Down Expand Up @@ -106,7 +102,7 @@ def load_model(model_name):
# Custom
else:
params = {"low_cpu_mem_usage": True}
if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps:
if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)):
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
shared.args.cpu = True

Expand Down
1 change: 0 additions & 1 deletion modules/text_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
else:
return input_ids.cuda()


def decode(output_ids):
# Open Assistant relies on special tokens like <|endoftext|>
if re.match('(oasst|galactica)-*', shared.model_name.lower()):
Expand Down

0 comments on commit e26763a

Please sign in to comment.