Skip to content

Commit

Permalink
change version (huggingface#25387)
Browse files Browse the repository at this point in the history
  • Loading branch information
SunMarc authored and EduardoPach committed Aug 9, 2023
1 parent 36098be commit b8cd6b1
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
4 changes: 2 additions & 2 deletions src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2786,10 +2786,10 @@ def from_pretrained(
model = replace_with_bnb_linear(
model, modules_to_not_convert=modules_to_not_convert, quantization_config=quantization_config
)
# training in 8-bit is only available in 0.37.0+
# training in 8-bit is only available in 0.37.0+ but a major bug in 8-bit optimizers was fixed in 0.41.1
model._is_quantized_training_enabled = version.parse(
importlib.metadata.version("bitsandbytes")
) >= version.parse("0.37.0")
) >= version.parse("0.41.1")

model.config.quantization_config = quantization_config
model.is_8bit_serializable = is_8bit_serializable
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ def __init__(
else:
raise ValueError(
"The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit"
" model, please make sure that you have installed `bitsandbytes>=0.37.0`. "
" model, please make sure that you have installed `bitsandbytes>=0.41.1`. "
)

# Setup Sharded DDP training
Expand Down

0 comments on commit b8cd6b1

Please sign in to comment.