Skip to content

Commit 254e0d4

Browse files
committed
initial commit
0 parents  commit 254e0d4

File tree

4 files changed

+108
-0
lines changed

4 files changed

+108
-0
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
hf_download
2+
outputs

Dockerfile

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
# Base image with CUDA 12.6 (compatible with Torch 2.6's CUDA 12.4 requirements)
2+
FROM nvidia/cuda:12.6.0-devel-ubuntu22.04
3+
4+
# Set environment variables
5+
ENV DEBIAN_FRONTEND=noninteractive \
6+
PYTHONUNBUFFERED=1 \
7+
PYTHONDONTWRITEBYTECODE=1 \
8+
VIRTUAL_ENV=/venv \
9+
PATH="/venv/bin:$PATH" \
10+
LD_LIBRARY_PATH="/usr/local/cuda/lib64:$LD_LIBRARY_PATH"
11+
12+
# Install system dependencies
13+
RUN apt-get update && apt-get install -y --no-install-recommends \
14+
git \
15+
python3.10 \
16+
python3.10-venv \
17+
python3.10-dev \
18+
libgl1 \
19+
libglib2.0-0 \
20+
libsm6 \
21+
libxrender1 \
22+
libxext6 \
23+
ninja-build \
24+
&& rm -rf /var/lib/apt/lists/*
25+
26+
# Create virtual environment
27+
RUN python3.10 -m venv $VIRTUAL_ENV
28+
29+
# Install PyTorch 2.6.0 with CUDA 12.4 compatibility (works with 12.6 base)
30+
RUN pip install --no-cache-dir \
31+
torch==2.6.0 \
32+
torchvision \
33+
torchaudio \
34+
--index-url https://download.pytorch.org/whl/cu124
35+
36+
# Clone FramePack (Torch 2.6 compatible version)
37+
RUN git clone https://github.com/lllyasviel/FramePack /app && \
38+
cd /app
39+
40+
WORKDIR /app
41+
42+
# Install requirements
43+
RUN pip install --no-cache-dir -r requirements.txt
44+
45+
# Install additional dependencies
46+
RUN pip install --no-cache-dir \
47+
triton==3.0.0 \
48+
sageattention==1.0.6
49+
50+
# Model directory setup
51+
RUN mkdir -p /app/hf_download && \
52+
chmod -R 777 /app/hf_download
53+
54+
VOLUME /app/hf_download
55+
56+
# Expose output directory
57+
RUN mkdir -p /app/outputs && \
58+
chmod -R 777 /app/outputs
59+
60+
VOLUME /app/outputs
61+
62+
# Runtime configuration
63+
EXPOSE 7860
64+
CMD ["python", "demo_gradio.py", "--share"]

README.md

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
## FramePack Docker CUDA
2+
3+
Very easy:
4+
5+
```
6+
7+
git clone https://github.com/akitaonrails/FramePack-Docker-CUDA.git
8+
cd FramePack-Docker-CUDA
9+
mkdir outputs
10+
mkdir hf_download
11+
12+
# Build the image
13+
docker build -t framepack-torch26-cu124:latest .
14+
15+
# Run mapping the directories outside:
16+
docker run -it --rm --gpus all -p 7860:7860 \
17+
-v ./outputs:/app/outputs \
18+
-v ./hf_download:/app/hf_download \
19+
framepack-torch26-cu124:latest
20+
```
21+
22+
The first time it runs, it will download all necessary HunyuanVideo, Flux and other neccessary models. It will be more than 30GB, so be patient, but they will be cached on the external mapped directory.
23+
24+
When it finishes access http://localhost:7860 and that's it!

entrypoint.sh

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#!/bin/bash
2+
set -e
3+
4+
# Check for required models
5+
MODEL_CHECKLIST=(
6+
"/app/models/checkpoints/sd_xl_base_1.0.safetensors"
7+
"/app/models/upscale_models/realesr-general-x4v3.pth"
8+
)
9+
10+
for model in "${MODEL_CHECKLIST[@]}"; do
11+
if [ ! -f "$model" ]; then
12+
echo "ERROR: Missing required model: $model"
13+
echo "Either mount volumes or rebuild image with model downloads"
14+
exit 1
15+
fi
16+
done
17+
18+
exec python demo_gradio.py --share --server-name 0.0.0.0

0 commit comments

Comments
 (0)