Skip to content

Commit 4d21e1e

Browse files
committed
pre-commit
1 parent 010fa99 commit 4d21e1e

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

vllm/attention/layer.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,14 +99,19 @@ def check_upstream_fa_availability(dtype: torch.dtype):
9999

100100

101101
def maybe_get_vit_flash_attn_backend(
102-
attn_backend: _Backend, use_upstream_fa: bool,
102+
attn_backend: _Backend,
103+
use_upstream_fa: bool,
103104
attn_backend_override: _Backend | None = None,
104105
) -> tuple[_Backend, Callable | None]:
105106
if current_platform.is_rocm():
106107
if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA and on_gfx9():
107108
attn_backend = _Backend.ROCM_AITER_FA
108109

109-
elif check_upstream_fa_availability(torch.get_default_dtype()) and on_gfx9() and attn_backend_override is None:
110+
elif (
111+
check_upstream_fa_availability(torch.get_default_dtype())
112+
and on_gfx9()
113+
and attn_backend_override is None
114+
):
110115
attn_backend = _Backend.FLASH_ATTN
111116
use_upstream_fa = True
112117
else:

0 commit comments

Comments
 (0)