Skip to content

Commit

Permalink
[CustomDevice] fix fused rope usage of llama on npu (#8052)
Browse files Browse the repository at this point in the history
  • Loading branch information
SylarTiaNII authored Mar 14, 2024
1 parent d7b5939 commit bde3317
Showing 1 changed file with 11 additions and 1 deletion.
12 changes: 11 additions & 1 deletion paddlenlp/transformers/llama/modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,16 @@
]


def is_fused_rope_valid():
current_device = get_env_device()
if current_device == "gpu":
return fused_rotary_position_embedding is not None
elif current_device == "npu":
return True
else:
return False


def _get_interleave(n):
def _get_interleave_power_of_2(n):
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
Expand Down Expand Up @@ -655,7 +665,7 @@ def __init__(self, config: LlamaConfig, layerwise_recompute: bool = False):

self.use_fused_rope = config.use_fused_rope
if self.use_fused_rope:
if "gpu" not in paddle.device.get_device() or fused_rotary_position_embedding is None:
if not is_fused_rope_valid():
warnings.warn(
"Enable fuse rope in the config, but fuse rope is not available. "
"Will disable fuse rope. Try using latest gpu version of Paddle."
Expand Down

0 comments on commit bde3317

Please sign in to comment.