@@ -77,7 +77,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
77
77
# can be useful for both `dev` and `test`
78
78
# explicitly set the list to avoid issues with torch 2.2
79
79
# see https://github.com/pytorch/pytorch/pull/123243
80
- ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10.0+PTX '
80
+ ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10.0 12.0 '
81
81
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
82
82
# Override the arch list for flash-attn to reduce the binary size
83
83
ARG vllm_fa_cmake_gpu_arches='80-real;90-real'
@@ -244,7 +244,7 @@ RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist
244
244
245
245
# If we need to build FlashInfer wheel before its release:
246
246
# $ # Note we remove 7.0 from the arch list compared to the list below, since FlashInfer only supports sm75+
247
- # $ export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a'
247
+ # $ export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a 12.0 '
248
248
# $ git clone https://github.com/flashinfer-ai/flashinfer.git --recursive
249
249
# $ cd flashinfer
250
250
# $ git checkout v0.2.6.post1
@@ -261,7 +261,7 @@ if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \
261
261
if [[ "$CUDA_VERSION" == 12.8* ]]; then \
262
262
uv pip install --system https://download.pytorch.org/whl/cu128/flashinfer/flashinfer_python-0.2.6.post1%2Bcu128torch2.7-cp39-abi3-linux_x86_64.whl; \
263
263
else \
264
- export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a' && \
264
+ export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a 12.0 ' && \
265
265
git clone https://github.com/flashinfer-ai/flashinfer.git --single-branch --branch v0.2.6.post1 --recursive && \
266
266
# Needed to build AOT kernels
267
267
(cd flashinfer && \
0 commit comments