Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ Dockerfile
!.git
!.gitignore
!.gitmodules
!.python-version
32 changes: 20 additions & 12 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,24 @@ jobs:
- name: Build Docker image
run: docker build --tag gpudrive:latest .

- name: Run smoke test inside Docker container
- name: Run smoke test inside Docker container (using a here document)
run: |
docker run --rm gpudrive:latest /bin/bash -c "
echo 'Dummy cuda' \
&& ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 \
&& export LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs/:$LD_LIBRARY_PATH \
&& echo 'Modifications to run without extra data' \
&& sed -i 's|train_path: \"data/processed/training\"|train_path: \"data/processed/examples\"|g' examples/experimental/config/visualization_config.yaml \
&& sed -i '/# Load policy/{N;N;N;N;N;N;s|# Load policy\n policy = load_policy(\n path_to_cpt=config.cpt_path,\n model_name=config.cpt_name,\n device=config.device,\n env=env,\n )|from gpudrive.networks.late_fusion import NeuralNet\n policy = NeuralNet.from_pretrained(\"daphne-cornelisse/policy_S10_000_02_27\")|}' examples/experimental/viz_rollouts.py \
&& echo 'Modifications to run without GPU' \
&& sed -i 's/device=\"cuda\"/device=\"cpu\"/g' gpudrive/datatypes/observation.py \
&& python examples/experimental/viz_rollouts.py
"
docker run --rm gpudrive:latest /bin/bash -c "$(cat << 'EOF'
#!/bin/bash
set -e
set -x
export DEBIAN_FRONTEND=noninteractive
echo 'Dummy cuda'
ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1
export LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs/:$LD_LIBRARY_PATH
echo 'Modifications to run without extra data'
sed -i 's|train_path: \"data/processed/training\"|train_path: \"data/processed/examples\"|g' examples/experimental/config/visualization_config.yaml
sed -i '/# Load policy/{N;N;N;N;N;N;s|# Load policy\n policy = load_policy(\n path_to_cpt=config.cpt_path,\n model_name=config.cpt_name,\n device=config.device,\n env=env,\n )|from gpudrive.networks.late_fusion import NeuralNet\n policy = NeuralNet.from_pretrained(\"daphne-cornelisse/policy_S10_000_02_27\")|}' examples/experimental/viz_rollouts.py
echo 'Modifications to run without GPU'
sed -i 's/device=\"cuda\"/device=\"cpu\"/g' gpudrive/datatypes/observation.py
export PYTHONFAULTHANDLER=1
ulimit -c unlimited
echo 'Run smoketest'
/gpudrive/.venv/bin/python examples/experimental/viz_rollouts.py
EOF
)"
7 changes: 1 addition & 6 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
.vscode/launch.json
.vscode/settings.json
.vscode/tasks.json
.uv_cache

/examples/benchmarks/results/
/baselines/ppo/logs/*
Expand All @@ -17,7 +18,6 @@
*madrona.diff
/bin
/zipp*
.python-version
/google-cloud-sdk/*
*.gz

Expand Down Expand Up @@ -155,11 +155,6 @@ target/
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
Expand Down
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[submodule "external/madrona"]
path = external/madrona
url = https://github.com/shacklettbp/madrona.git
url = https://github.com/m-naumann/madrona.git
[submodule "external/json"]
path = external/json
url = https://github.com/nlohmann/json.git
1 change: 1 addition & 0 deletions .python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.11
3 changes: 1 addition & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
cmake_policy(VERSION 3.18)
cmake_minimum_required(VERSION 3.24 FATAL_ERROR) # for madrona

include("${CMAKE_CURRENT_SOURCE_DIR}/external/madrona/cmake/madrona_init.cmake")

Expand Down
31 changes: 9 additions & 22 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Base image with CUDA and cuDNN support
FROM nvidia/cuda:12.2.2-cudnn8-devel-ubuntu22.04
FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04

ARG DEBIAN_FRONTEND=noninteractive

Expand Down Expand Up @@ -27,39 +27,26 @@ RUN apt-get update && apt-get install -y -q --no-install-recommends \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

# Install Python 3.11
RUN apt-add-repository -y ppa:deadsnakes/ppa \
&& apt-get install -y -q --no-install-recommends python3.11 python3.11-dev python3.11-distutils \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11

# Set Python 3.11 as default
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 11 && \
update-alternatives --install /usr/bin/python python /usr/bin/python3.11 11

RUN apt-get remove -y cmake && pip3 install --no-cache-dir --upgrade cmake
# Install uv
RUN curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="/usr/bin" sh

# Copy the gpudrive repository
COPY . /gpudrive
WORKDIR /gpudrive
RUN git submodule update --init --recursive --depth 1
RUN git submodule update --init --recursive

# Install python part using uv
RUN uv sync --frozen

ENV MADRONA_MWGPU_KERNEL_CACHE=./gpudrive_cache

RUN mkdir build
WORKDIR /gpudrive/build
RUN cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_POLICY_VERSION_MINIMUM=3.5 && find external -type f -name "*.tar" -delete
RUN uv run cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_POLICY_VERSION_MINIMUM=3.5 && find external -type f -name "*.tar" -delete
RUN ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1
RUN LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs/:$LD_LIBRARY_PATH make -j
RUN LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs/:$LD_LIBRARY_PATH uv run make -j
RUN rm /usr/local/cuda/lib64/stubs/libcuda.so.1
WORKDIR /gpudrive

RUN pip3 install --no-cache-dir torch==2.6.0 && rm -rf ~/.cache/pip/*
RUN pip3 install --no-cache-dir tensorflow==2.19.0 && rm -rf ~/.cache/pip/*
RUN pip3 install --no-cache-dir nvidia-cuda-runtime-cu12==12.4.127 && rm -rf ~/.cache/pip/*
RUN pip3 install --no-cache-dir -e .[vbd,pufferlib]

CMD ["/bin/bash"]
LABEL org.opencontainers.image.source=https://github.com/Emerge-Lab/gpudrive
39 changes: 25 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
GPUDrive
========

![Python version](https://img.shields.io/badge/Python-3.11-blue) [![Paper](https://img.shields.io/badge/arXiv-2408.01584-b31b1b.svg)](https://arxiv.org/abs/2408.01584)
[![Paper](https://img.shields.io/badge/arXiv-2408.01584-b31b1b.svg)](https://arxiv.org/abs/2408.01584)
[![GitHub CI](https://github.com/Emerge-Lab/gpudrive/actions/workflows/ci.yml/badge.svg)](https://github.com/Emerge-Lab/gpudrive/actions/workflows/ci.yml)
[![License](https://img.shields.io/github/license/Emerge-Lab/gpudrive)](LICENSE)
![Python version](https://img.shields.io/badge/Python-3.11-blue)

An extremely fast, data-driven driving simulator written in C++.

Expand Down Expand Up @@ -66,20 +69,28 @@ For Windows, open the cloned repository in Visual Studio and build the project u

Next, set up a Python environment

#### With pyenv (Recommended)
#### With uv (Recommended)

Create a virtual environment:
Create a virtual environment and install the Python components of the repository:

```bash
pyenv virtualenv 3.11 gpudrive
pyenv activate gpudrive
uv sync --frozen
```

Set it for the current project directory (optional):
#### With pyenv

```bash
pyenv local gpudrive
```
Create a virtual environment:

```bash
pyenv virtualenv 3.11 gpudrive
pyenv activate gpudrive
```

Set it for the current project directory (optional):

```bash
pyenv local gpudrive
```

#### With conda

Expand All @@ -90,18 +101,18 @@ conda activate gpudrive

### Install Python package

Finally, install the Python components of the repository using pip:
Finally, install the Python components of the repository using pip (this step is not required for the `uv` installation):

```bash
# macOS and Linux.
pip install -e .
```

Optional depencies include [pufferlib], [sb3], [vbd], and [tests].
Dependency-groups include `pufferlib`, `sb3`, `vbd`, and `tests`.

```bash
# On Windows.
pip install -e . -Cpackages.madrona_escape_room.ext-out-dir=PATH_TO_YOUR_BUILD_DIR on Windows
pip install -e . -Cpackages.madrona_escape_room.ext-out-dir=<PATH_TO_YOUR_BUILD_DIR on Windows>
```

</details>
Expand All @@ -124,14 +135,14 @@ Ensure you have the following installed:
Once installed, you can build the container with:

```bash
DOCKER_BUILDKIT=1 docker build --build-arg USE_CUDA=true --tag my_image:latest --progress=plain .
DOCKER_BUILDKIT=1 docker build --build-arg USE_CUDA=true --tag gpudrive:latest --progress=plain .
```

### Running the Container
To run the container with GPU support and shared memory:

```bash
docker run --gpus all -it --rm --shm-size=20G -v ${PWD}:/workspace my_image:latest /bin/bash
docker run --gpus all -it --rm --shm-size=20G -v ${PWD}:/workspace gpudrive:latest /bin/bash
```

</details>
Expand Down
1 change: 1 addition & 0 deletions baselines/ppo/ppo_pufferlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ def run(
else config.environment.k_unique_scenes,
sample_with_replacement=config.train.sample_with_replacement,
shuffle=config.train.shuffle_dataset,
seed=seed,
)

# Make environment
Expand Down
4 changes: 2 additions & 2 deletions data_utils/process_waymo_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -624,15 +624,15 @@ def process_data(args):

if args.dataset == "all":
datasets = ["training", "validation", "testing"]
elif args.dataset == "train":
elif args.dataset == "training":
datasets = ["training"]
elif args.dataset == "validation":
datasets = ["validation"]
elif args.dataset == "testing":
datasets = ["testing"]
else:
raise ValueError(
"Invalid dataset name. Must be one of: 'all', 'train', 'validation', or 'testing'"
"Invalid dataset name. Must be one of: 'all', 'training', 'validation', or 'testing'"
)

for dataset in datasets:
Expand Down
2 changes: 1 addition & 1 deletion external/madrona
34 changes: 27 additions & 7 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ requires = [
"cmake>=3.18",
"ninja",
]
build-backend = "madrona-py-build"
build-backend = "madrona_py_build"
backend-path = ["external/madrona/py"]
wheel-directory = "build"

Expand All @@ -22,7 +22,7 @@ readme = "README.md"
description = "A GPU-accelerated, multi-agent driving simulator"
requires-python = ">=3.11"
dependencies = [
"numpy>=1.26.4",
"numpy>=1.26.4,<2",
"gymnasium",
"pygame",
"matplotlib==3.9",
Expand All @@ -39,26 +39,46 @@ dependencies = [
"tqdm",
"jax",
"huggingface_hub",
"cmake==4.0.0",
]

[project.optional-dependencies]
[dependency-groups]
pufferlib = [
"pufferlib>=2.0.6",
"pufferlib>=2.0.6,<3",
]
sb3 = [
"stable-baselines3==2.3.2",
]

test = [
"pytest>=8.2.1",
]

vbd = [
"lightning",
"jaxlib",
"jaxlib==0.5.3", # see https://github.com/Emerge-Lab/gpudrive/issues/464
"waymo-waymax @ git+https://github.com/waymo-research/waymax.git@main",
]

[tool.uv]
default-groups = "all"
cache-dir = "./.uv_cache"

# Use the CUDA index for torch on Linux
[[tool.uv.index]]
name = "pytorch-cuda"
url = "https://download.pytorch.org/whl/cu124"
explicit = true

[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true

[tool.uv.sources]
torch = [
{ index = "pytorch-cuda", marker = "platform_system == 'Linux'" },
{ index = "pytorch-cpu", marker = "platform_system != 'Linux'" }
]

[tool.madrona.packages.madrona_gpudrive]
ext-only = true
ext-out-dir = "build"
Expand Down
7 changes: 7 additions & 0 deletions src/level_gen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,13 @@ void createPaddingEntities(Engine &ctx) {
}
auto &self_obs = ctx.get<SelfObservation>(agent_iface);
self_obs = SelfObservation::zero();

auto &abs_self_obs = ctx.get<AbsoluteSelfObservation>(agent_iface);
abs_self_obs.position = Vector3::zero();
abs_self_obs.rotation = AbsoluteRotation{.rotationAsQuat = Quat{1, 0, 0, 0}, .rotationFromAxis = 0};
abs_self_obs.goal = Goal{.position = {0, 0}};
abs_self_obs.vehicle_size = VehicleSize{.length = 0, .width = 0, .height = 0};
abs_self_obs.id = -1.0f;

auto &partner_obs = ctx.get<PartnerObservations>(agent_iface);
for (CountT i = 0; i < consts::kMaxAgentCount-1; i++) {
Expand Down
1 change: 1 addition & 0 deletions src/level_gen.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ void destroyWorld(Engine &ctx);
{
ctx.get<Position>(road) = pos;
ctx.get<Rotation>(road) = rot;
ctx.get<Velocity>(road) = Velocity{madrona::math::Vector3::zero(), madrona::math::Vector3::zero()};
ctx.get<Scale>(road) = scale;
ctx.get<EntityType>(road) = type;
ctx.get<ObjectID>(road) = objId;
Expand Down
Loading
Loading