Skip to content

Commit

Permalink
refactor(peft): check minimum version
Browse files Browse the repository at this point in the history
  • Loading branch information
dacorvo committed Jan 27, 2025
1 parent 654f5ea commit c35ba4f
Show file tree
Hide file tree
Showing 8 changed files with 22 additions and 8 deletions.
2 changes: 1 addition & 1 deletion optimum/neuron/accelerate/accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from transformers import PreTrainedModel
from transformers.utils import is_peft_available

from ...utils import logging
from ..distributed import Parallelizer, ParallelizersManager
Expand All @@ -47,6 +46,7 @@
patch_within_function,
replace_class_in_inheritance_hierarchy,
)
from ..utils.import_utils import is_peft_available
from ..utils.misc import args_and_kwargs_to_kwargs_only, is_main_worker
from ..utils.model_utils import get_tied_parameters_dict, tie_parameters
from ..utils.require_utils import requires_neuronx_distributed, requires_torch_xla
Expand Down
2 changes: 1 addition & 1 deletion optimum/neuron/distributed/checkpointing.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
is_peft_available,
)

from ..utils.import_utils import is_peft_available
from ..utils.peft_utils import ADAPTER_MODEL_PARALLEL_SHARDS_DIR_NAME
from ..utils.require_utils import requires_neuronx_distributed, requires_safetensors, requires_torch_xla
from .utils import MODEL_PARALLEL_SHARDS_DIR_NAME, ParameterMetadata, compute_query_indices_for_rank
Expand Down
2 changes: 1 addition & 1 deletion optimum/neuron/distributed/parallel_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@

import torch
from torch.nn.modules.loss import _WeightedLoss
from transformers.utils import is_peft_available

from ...utils import NormalizedConfigManager, logging
from ..utils import patch_everywhere, patch_within_function
from ..utils.import_utils import is_peft_available
from ..utils.misc import is_main_worker
from ..utils.require_utils import requires_neuronx_distributed
from .utils import (
Expand Down
3 changes: 1 addition & 2 deletions optimum/neuron/distributed/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,11 @@

import torch
from transformers import PretrainedConfig
from transformers.utils import is_peft_available
from transformers.utils.fx import HFTracer

from ...utils import logging
from ..utils import DynamicPatch, Patcher
from ..utils.import_utils import is_neuronx_distributed_available
from ..utils.import_utils import is_neuronx_distributed_available, is_peft_available
from ..utils.misc import download_checkpoints_in_cache, is_precompilation
from ..utils.peft_utils import NeuronPeftModel
from ..utils.require_utils import requires_neuronx_distributed, requires_peft, requires_safetensors, requires_torch_xla
Expand Down
2 changes: 1 addition & 1 deletion optimum/neuron/trainers.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@
WEIGHTS_NAME,
is_accelerate_available,
is_apex_available,
is_peft_available,
is_sagemaker_mp_enabled,
)

Expand All @@ -102,6 +101,7 @@
has_write_access_to_repo,
)
from .utils.hub_cache_utils import ModelCacheEntry, hub_neuronx_cache, patch_neuron_cc_wrapper, synchronize_hub_cache
from .utils.import_utils import is_peft_available
from .utils.misc import is_main_worker, is_precompilation
from .utils.peft_utils import NeuronPeftModel, get_peft_model
from .utils.require_utils import requires_neuronx_distributed, requires_torch_neuronx
Expand Down
14 changes: 14 additions & 0 deletions optimum/neuron/utils/import_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@


MIN_ACCELERATE_VERSION = "0.20.1"
MIN_PEFT_VERSION = "0.14.0"


def is_neuron_available() -> bool:
Expand Down Expand Up @@ -80,3 +81,16 @@ def is_trl_available(required_version: Optional[str] = None) -> bool:

raise RuntimeError(f"Only `trl=={required_version}` is supported, but {trl.__version__} is installed.")
return False


def is_peft_available(min_version: Optional[str] = MIN_PEFT_VERSION) -> bool:
_peft_available = importlib.util.find_spec("peft") is not None
if min_version is not None:
if _peft_available:
import peft

_peft_version = peft.__version__
return version.parse(_peft_version) >= version.parse(min_version)
else:
return False
return _peft_available
2 changes: 1 addition & 1 deletion optimum/neuron/utils/peft_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
from typing import Any, List, Optional, Tuple, Union

import torch
from transformers.utils import is_peft_available

from .import_utils import is_peft_available
from .patching import Patcher, replace_class_in_inheritance_hierarchy
from .require_utils import requires_neuronx_distributed, requires_safetensors
from .training_utils import _get_model_param_count
Expand Down
3 changes: 2 additions & 1 deletion optimum/neuron/utils/require_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@
import functools
from typing import Any, Callable, Dict

from transformers.utils import is_peft_available, is_safetensors_available
from transformers.utils import is_safetensors_available

from .import_utils import (
is_neuronx_distributed_available,
is_peft_available,
is_torch_neuronx_available,
is_torch_xla_available,
is_transformers_neuronx_available,
Expand Down

0 comments on commit c35ba4f

Please sign in to comment.