Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/scripts/filter-matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

# currently we don't support python 3.13t due to tensorrt does not support 3.13t
disabled_python_versions: List[str] = ["3.13t", "3.14", "3.14t"]
disabled_cuda_versions: List[str] = ["cu130"]
disabled_cuda_versions: List[str] = []

# jetpack 6.2 only officially supports python 3.10 and cu126
jetpack_python_versions: List[str] = ["3.10"]
Expand Down
6 changes: 3 additions & 3 deletions .github/scripts/generate-release-matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
import sys

RELEASE_CUDA_VERSION = {
"wheel": ["cu129"],
"tarball": ["cu129"],
"wheel": ["cu130"],
"tarball": ["cu130"],
}
RELEASE_PYTHON_VERSION = {
"wheel": ["3.10", "3.11", "3.12", "3.13"],
Expand All @@ -15,7 +15,7 @@
sbsa_container_image: str = "quay.io/pypa/manylinux_2_34_aarch64"

CXX11_TARBALL_CONTAINER_IMAGE = {
"cu129": "pytorch/libtorch-cxx11-builder:cuda12.9-main",
"cu130": "pytorch/libtorch-cxx11-builder:cuda13.0-main",
}


Expand Down
10 changes: 5 additions & 5 deletions .github/scripts/generate-tensorrt-test-matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,18 @@
# channel: nightly if the future tensorRT version test workflow is triggered from the main branch or your personal branch
# channel: test if the future tensorRT version test workflow is triggered from the release branch(release/2.5 etc....)
CUDA_VERSIONS_DICT = {
"nightly": ["cu129"],
"test": ["cu126", "cu128", "cu129"],
"release": ["cu126", "cu128", "cu129"],
"nightly": ["cu130"],
"test": ["cu126", "cu128", "cu130"],
"release": ["cu126", "cu128", "cu130"],
}

# please update the python version you want to test with the future tensorRT version here
# channel: nightly if the future tensorRT version test workflow is triggered from the main branch or your personal branch
# channel: test if the future tensorRT version test workflow is triggered from the release branch(release/2.5 etc....)
PYTHON_VERSIONS_DICT = {
"nightly": ["3.11"],
"test": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"release": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"test": ["3.10", "3.11", "3.12", "3.13"],
"release": ["3.10", "3.11", "3.12", "3.13"],
}

# please update the future tensorRT version you want to test here
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/build_windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@ jobs:
env:
ENV_SCRIPT: ${{ inputs.env-script }}
run: |
set -x
source "${BUILD_ENV_FILE}"
if [[ -z "${ENV_SCRIPT}" ]]; then
${CONDA_RUN} python setup.py clean
Expand Down
12 changes: 6 additions & 6 deletions .github/workflows/docgen.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@ jobs:
if: ${{ ! contains(github.actor, 'pytorchbot') }}
environment: pytorchbot-env
container:
image: docker.io/pytorch/manylinux2_28-builder:cuda12.9
image: docker.io/pytorch/manylinux2_28-builder:cuda13.0
options: --gpus all
env:
CUDA_HOME: /usr/local/cuda-12.9
VERSION_SUFFIX: cu129
CU_VERSION: cu129
CUDA_HOME: /usr/local/cuda-13.0
VERSION_SUFFIX: cu130
CU_VERSION: cu130
CHANNEL: nightly
CI_BUILD: 1
steps:
Expand All @@ -35,14 +35,14 @@ jobs:
- name: Install base deps
run: |
python3 -m pip install pip --upgrade
python3 -m pip install pyyaml numpy torch --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu129
python3 -m pip install pyyaml numpy torch --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu130
./packaging/pre_build_script.sh
- name: Get HEAD SHA
id: vars
run: echo "sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- name: Build Python Package
run: |
python3 -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu129
python3 -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130
- name: Generate New Docs
run: |
cd docsrc
Expand Down
6 changes: 3 additions & 3 deletions MODULE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ new_local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
path = "/usr/local/cuda-12.9/",
path = "/usr/local/cuda-13.0/",
)

# for Jetson
Expand Down Expand Up @@ -65,7 +65,7 @@ http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu129/libtorch-shared-with-deps-latest.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu130/libtorch-shared-with-deps-latest.zip"],
)

# in aarch64 platform you can get libtorch via either local or wheel file
Expand All @@ -83,7 +83,7 @@ http_archive(
name = "libtorch_win",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu129/libtorch-win-shared-with-deps-latest.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu130/libtorch-win-shared-with-deps-latest.zip"],
)

http_archive(
Expand Down
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ Torch-TensorRT
<h4> Easily achieve the best inference performance for any PyTorch model on the NVIDIA platform. </h4>

[![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/)
[![pytorch](https://img.shields.io/badge/PyTorch-2.8-green)](https://download.pytorch.org/whl/nightly/cu128)
[![cuda](https://img.shields.io/badge/CUDA-12.8-green)](https://developer.nvidia.com/cuda-downloads)
[![trt](https://img.shields.io/badge/TensorRT-10.13.2.6-green)](https://github.com/nvidia/tensorrt-llm)
[![pytorch](https://img.shields.io/badge/PyTorch-2.9-green)](https://download.pytorch.org/whl/nightly/cu130)
[![cuda](https://img.shields.io/badge/CUDA-13.0-green)](https://developer.nvidia.com/cuda-downloads)
[![trt](https://img.shields.io/badge/TensorRT-10.12.0-green)](https://github.com/nvidia/tensorrt)
[![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE)
[![Linux x86-64 Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml)
[![Linux SBSA Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml)
Expand All @@ -27,7 +27,7 @@ pip install torch-tensorrt

Nightly versions of Torch-TensorRT are published on the PyTorch package index
```bash
pip install --pre torch-tensorrt --index-url https://download.pytorch.org/whl/nightly/cu128
pip install --pre torch-tensorrt --index-url https://download.pytorch.org/whl/nightly/cu130
```

Torch-TensorRT is also distributed in the ready-to-run [NVIDIA NGC PyTorch Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) which has all dependencies with the proper versions and example notebooks included.
Expand Down
4 changes: 2 additions & 2 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

# Base image starts with CUDA
#TODO: cuda version
ARG BASE_IMG=nvidia/cuda:12.9.0-devel-ubuntu22.04
ARG BASE_IMG=nvidia/cuda:13.0.0-devel-ubuntu22.04
FROM ${BASE_IMG} as base
ENV BASE_IMG=nvidia/cuda:12.9.0-devel-ubuntu22.04
ENV BASE_IMG=nvidia/cuda:13.0.0-devel-ubuntu22.04

ARG TENSORRT_VERSION
ENV TENSORRT_VERSION=${TENSORRT_VERSION}
Expand Down
2 changes: 1 addition & 1 deletion docker/dist-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ set -x

TOP_DIR=$(cd $(dirname $0); pwd)/..

BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu129 -w dist"
BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 -w dist"

# TensorRT restricts our pip version
cd ${TOP_DIR} \
Expand Down
12 changes: 6 additions & 6 deletions docsrc/getting_started/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Torch-TensorRT distributed nightlies targeting the PyTorch nightly. These can be

.. code-block:: sh

python -m pip install --pre torch torch-tensorrt tensorrt --extra-index-url https://download.pytorch.org/whl/nightly/cu128
python -m pip install --pre torch torch-tensorrt tensorrt --extra-index-url https://download.pytorch.org/whl/nightly/cu130



Expand Down Expand Up @@ -131,7 +131,7 @@ Once the WORKSPACE has been configured properly, all that is required to build t

.. code-block:: sh

python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128
python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130


If you use the ``uv`` (`https://docs.astral.sh/uv/ <https://docs.astral.sh/uv/>`_) tool to manage python and your projects, the command is slightly simpler
Expand All @@ -146,7 +146,7 @@ To build the wheel file

.. code-block:: sh

python -m pip wheel --no-deps --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128 -w dist
python -m pip wheel --no-deps --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 -w dist

Additional Build Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Expand All @@ -164,7 +164,7 @@ which has implications for features like serialization.

.. code-block:: sh

PYTHON_ONLY=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128
PYTHON_ONLY=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130


No TorchScript Frontend
Expand All @@ -175,7 +175,7 @@ of C++ code that is no longer necessary for most users. Therefore you can exclud

.. code-block:: sh

NO_TORCHSCRIPT=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128
NO_TORCHSCRIPT=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130


Building the C++ Library Standalone (TorchScript Only)
Expand Down Expand Up @@ -245,7 +245,7 @@ Build steps

* Open the app "x64 Native Tools Command Prompt for VS 2022" - note that Admin privileges may be necessary
* Ensure Bazelisk (Bazel launcher) is installed on your machine and available from the command line. Package installers such as Chocolatey can be used to install Bazelisk
* Install latest version of Torch (i.e. with ``pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu128``)
* Install latest version of Torch (i.e. with ``pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu130``)
* Clone the Torch-TensorRT repository and navigate to its root directory
* Run ``pip install ninja wheel setuptools``
* Run ``pip install --pre -r py/requirements.txt``
Expand Down
5 changes: 3 additions & 2 deletions py/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
numpy
packaging
pybind11==2.6.2
--extra-index-url https://download.pytorch.org/whl/nightly/cu129
--extra-index-url https://download.pytorch.org/whl/nightly/cu130
torch>=2.9.0.dev,<2.10.0
--extra-index-url https://pypi.ngc.nvidia.com
pyyaml
dllist
dllist
setuptools
9 changes: 7 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -100,14 +100,19 @@ index-strategy = "unsafe-best-match"

[tool.uv.sources]
torch = [
{ index = "pytorch-nightly-cu129", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" },
{ index = "pytorch-nightly-cu130", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" },
{ index = "jetson-containers", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
]
torchvision = [
{ index = "pytorch-nightly-cu129", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" },
{ index = "pytorch-nightly-cu130", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" },
{ index = "jetson-containers", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
]

[[tool.uv.index]]
name = "pytorch-nightly-cu130"
url = "https://download.pytorch.org/whl/nightly/cu130"
explicit = false

[[tool.uv.index]]
name = "pytorch-nightly-cu129"
url = "https://download.pytorch.org/whl/nightly/cu129"
Expand Down
3 changes: 2 additions & 1 deletion tests/py/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
expecttest==0.1.6
networkx==2.8.8
numpy
setuptools
parameterized>=0.2.0
pytest>=8.2.1
pytest-xdist>=3.6.1
Expand All @@ -13,6 +14,6 @@ nvidia-modelopt[all]; python_version >'3.9' and python_version <'3.13'
# flashinfer-python is not supported for python version 3.13 or higher
# flashinfer-python is broken on python 3.9 at the moment, so skip it for now
flashinfer-python; python_version >'3.9' and python_version <'3.13'
--extra-index-url https://download.pytorch.org/whl/nightly/cu129
--extra-index-url https://download.pytorch.org/whl/nightly/cu130
torchvision>=0.24.0.dev,<0.25.0
timm>=1.0.3
6 changes: 3 additions & 3 deletions tools/perf/Flux/create_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ apt install bazel
bazel
cd /home/TensorRT

python -m pip install --pre -e . --extra-index-url https://download.pytorch.org/whl/nightly/cu128
pip install tensorrt==10.9.0.34 --force-reinstall
python -m pip install --pre -e . --extra-index-url https://download.pytorch.org/whl/nightly/cu130
pip install tensorrt==10.13.2.6 --force-reinstall

pip3 install --pre torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128
pip3 install --pre torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu130


pip install sentencepiece=="0.2.0" transformers=="4.48.2" accelerate=="1.3.0" diffusers=="0.32.2" protobuf=="5.29.3"
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading