Skip to content

Commit

Permalink
update sklearn versrion, torch compile env vars, don't worry about fa…
Browse files Browse the repository at this point in the history
…ilure on preprocess load model (axolotl-ai-cloud#1821)

* update sklearn versrion, torch compile env vars, don't worry about failure on preprocess load model

* There is already a condition check within the function. This outer one is not necessary

Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>

---------

Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
  • Loading branch information
winglian and NanoCode012 authored Aug 16, 2024
1 parent 68a3c76 commit 803fed3
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 2 deletions.
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ numpy>=1.24.4
# qlora things
evaluate==0.4.1
scipy
scikit-learn==1.2.2
scikit-learn==1.4.2
pynvml
art
fschat @ git+https://github.com/lm-sys/FastChat.git@27a05b04a35510afb1d767ae7e5990cbd278f8fe
Expand Down
9 changes: 8 additions & 1 deletion src/axolotl/cli/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,14 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
# "copying from a non-meta parameter in the checkpoint to a meta parameter in the current model"
warnings.simplefilter("ignore")
with init_empty_weights(include_buffers=True):
AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
# fmt: off
try:
AutoModelForCausalLM.from_pretrained(
model_name, trust_remote_code=True
)
except Exception as exc: # pylint: disable=broad-exception-caught,unused-variable # nosec B110 # noqa F841
pass
# fmt: on

LOG.info(
Fore.GREEN
Expand Down
10 changes: 10 additions & 0 deletions src/axolotl/utils/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,14 @@ def calc_sample_packing_eff_est(estimates: List[float]):
return total_num_steps


def setup_torch_compile_env(cfg):
if cfg.torch_compile:
if not cfg.torch_compile_backend:
os.environ["ACCELERATE_DYNAMO_BACKEND"] = "INDUCTOR"
else:
os.environ["ACCELERATE_DYNAMO_BACKEND"] = cfg.torch_compile_backend.upper()


def setup_deepspeed_env(cfg, stage=None):
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
os.environ["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = cfg.deepspeed
Expand Down Expand Up @@ -434,6 +442,8 @@ def prepare_optim_env(cfg):
stage = deepspeed_config.get("zero_optimization", {}).get("stage", None)
setup_deepspeed_env(cfg, stage=stage)

setup_torch_compile_env(cfg)

if (cfg.bf16 == "auto" and is_torch_bf16_gpu_available()) or cfg.bf16 is True:
os.environ["ACCELERATE_MIXED_PRECISION"] = "bf16"
elif cfg.fp16:
Expand Down

0 comments on commit 803fed3

Please sign in to comment.