From 803fed3e904a527ce302fc818aef3500e07722c9 Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Fri, 16 Aug 2024 10:41:51 -0400 Subject: [PATCH] update sklearn versrion, torch compile env vars, don't worry about failure on preprocess load model (#1821) * update sklearn versrion, torch compile env vars, don't worry about failure on preprocess load model * There is already a condition check within the function. This outer one is not necessary Co-authored-by: NanoCode012 --------- Co-authored-by: NanoCode012 --- requirements.txt | 2 +- src/axolotl/cli/preprocess.py | 9 ++++++++- src/axolotl/utils/trainer.py | 10 ++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index f32af373b..dc74b916f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,7 +25,7 @@ numpy>=1.24.4 # qlora things evaluate==0.4.1 scipy -scikit-learn==1.2.2 +scikit-learn==1.4.2 pynvml art fschat @ git+https://github.com/lm-sys/FastChat.git@27a05b04a35510afb1d767ae7e5990cbd278f8fe diff --git a/src/axolotl/cli/preprocess.py b/src/axolotl/cli/preprocess.py index e0dd7c2dc..e12462c00 100644 --- a/src/axolotl/cli/preprocess.py +++ b/src/axolotl/cli/preprocess.py @@ -82,7 +82,14 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs): # "copying from a non-meta parameter in the checkpoint to a meta parameter in the current model" warnings.simplefilter("ignore") with init_empty_weights(include_buffers=True): - AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) + # fmt: off + try: + AutoModelForCausalLM.from_pretrained( + model_name, trust_remote_code=True + ) + except Exception as exc: # pylint: disable=broad-exception-caught,unused-variable # nosec B110 # noqa F841 + pass + # fmt: on LOG.info( Fore.GREEN diff --git a/src/axolotl/utils/trainer.py b/src/axolotl/utils/trainer.py index 02234d8b7..26796f2e5 100644 --- a/src/axolotl/utils/trainer.py +++ b/src/axolotl/utils/trainer.py @@ -390,6 +390,14 @@ def calc_sample_packing_eff_est(estimates: List[float]): return total_num_steps +def setup_torch_compile_env(cfg): + if cfg.torch_compile: + if not cfg.torch_compile_backend: + os.environ["ACCELERATE_DYNAMO_BACKEND"] = "INDUCTOR" + else: + os.environ["ACCELERATE_DYNAMO_BACKEND"] = cfg.torch_compile_backend.upper() + + def setup_deepspeed_env(cfg, stage=None): os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" os.environ["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = cfg.deepspeed @@ -434,6 +442,8 @@ def prepare_optim_env(cfg): stage = deepspeed_config.get("zero_optimization", {}).get("stage", None) setup_deepspeed_env(cfg, stage=stage) + setup_torch_compile_env(cfg) + if (cfg.bf16 == "auto" and is_torch_bf16_gpu_available()) or cfg.bf16 is True: os.environ["ACCELERATE_MIXED_PRECISION"] = "bf16" elif cfg.fp16: