From 13f71e208dc4f7a8a0e195828529a6b796f6e9c8 Mon Sep 17 00:00:00 2001 From: wild-deer <865735712@qq.com> Date: Tue, 21 Apr 2026 15:44:52 +0800 Subject: [PATCH] add docker feature to the project --- .dockerignore | 13 ++++++++++ Dockerfile | 42 +++++++++++++++++++++++++++++++ README.md | 63 ++++++++++++++++++++++++++++++++++++++++++++++ docker-compose.yml | 24 ++++++++++++++++++ 4 files changed, 142 insertions(+) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 docker-compose.yml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..a3aba57 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,13 @@ +.git +.venv +__pycache__/ +*.pyc +*.pyo +*.pyd +*.so +*.egg-info/ +dist/ +build/ +assets/ +example/ +lingbot-map_paper.pdf diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..93b18a1 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,42 @@ +FROM spxiong/pytorch:2.11.0-py3.10.19-cuda13.0.2-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive +WORKDIR /workspace + +ARG UBUNTU_MIRROR=https://mirrors.aliyun.com/ubuntu + +RUN set -eux; \ + find /etc/apt -type f -name "*.list" -print0 | xargs -0 -r sed -i "s|http://archive.ubuntu.com/ubuntu|${UBUNTU_MIRROR}|g"; \ + find /etc/apt -type f -name "*.list" -print0 | xargs -0 -r sed -i "s|https://archive.ubuntu.com/ubuntu|${UBUNTU_MIRROR}|g"; \ + find /etc/apt -type f -name "*.list" -print0 | xargs -0 -r sed -i "s|http://security.ubuntu.com/ubuntu|${UBUNTU_MIRROR}|g"; \ + find /etc/apt -type f -name "*.list" -print0 | xargs -0 -r sed -i "s|https://security.ubuntu.com/ubuntu|${UBUNTU_MIRROR}|g"; \ + find /etc/apt -type f -name "*.list" -print0 | xargs -0 -r sed -i "/jammy-backports/d"; \ + for i in 1 2 3; do \ + apt-get -o Acquire::Retries=5 -o Acquire::http::Timeout=30 update && \ + apt-get -o Acquire::Retries=5 -o Acquire::http::Timeout=30 install -y --no-install-recommends --fix-missing \ + ca-certificates \ + git \ + ffmpeg \ + libgl1 \ + libglib2.0-0 \ + libgomp1 \ + libsm6 \ + libxext6 \ + libxrender1 \ + && break; \ + sleep 5; \ + done; \ + rm -rf /var/lib/apt/lists/* + +COPY pyproject.toml README.md LICENSE.txt ./ +COPY lingbot_map ./lingbot_map +COPY demo.py gct_profile.py ./ + +RUN python -m pip install --upgrade pip +RUN python -m pip install --no-cache-dir flashinfer-python flashinfer-cubin +RUN python -m pip install --no-cache-dir flashinfer-jit-cache --index-url https://flashinfer.ai/whl/cu130 --no-deps || true +RUN python -m pip install -e ".[vis]" + +EXPOSE 8080 + +CMD ["bash"] diff --git a/README.md b/README.md index e0604b9..e72cda4 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,69 @@ pip install flashinfer-python -i https://flashinfer.ai/whl/cu128/torch2.9/ pip install -e ".[vis]" ``` +## Docker / Docker Compose + +If you want a quick, reproducible runtime environment (CUDA + dependencies, with optional FlashInfer), you can use the provided [Dockerfile](Dockerfile) and [docker-compose.yml](docker-compose.yml). + +### Prerequisites + +- Install Docker Desktop (or Docker Engine on Linux). +- For GPU support: + - Linux: install the NVIDIA driver + [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + - Windows: use the WSL2 backend and make sure `nvidia-smi` works inside WSL + +### Build and enter the container + +```bash +docker compose build +docker compose run --rm --service-ports lingbot-map +``` + +Inside the container (default working directory is `/workspace`, and the repo is mounted via a volume), run the demo: + +```bash +python demo.py --model_path /path/to/lingbot-map-long.pt \ + --image_folder example/church --mask_sky +``` + +Open `http://localhost:8080` in your browser (compose maps `8080:8080`). + +### Exit and cleanup + +```bash +exit +docker compose down +``` + +(Optional) This compose persists HuggingFace and Torch caches in named volumes: `hf_cache`, `torch_cache`. To remove them: + +```bash +docker volume rm lingbot-map_hf_cache lingbot-map_torch_cache +``` + +### Adjust for your CUDA version + +The current [Dockerfile](Dockerfile) uses a base image and FlashInfer wheel for **CUDA 13.0** (`cuda13.0.2` / `cu130`). If your machine/driver environment is better matched with CUDA 12.x (more common), update both of the following accordingly: + +1) Update the CUDA version in the Docker base image (`FROM ...cudaXX...` on line 1 of `Dockerfile`) + +2) Update the FlashInfer wheel index URL (the `--index-url https://flashinfer.ai/whl/cuXXX` line in `Dockerfile`) + +Example: switch to **CUDA 12.8** (illustrative; choose a base image and FlashInfer wheel that match your actual PyTorch/CUDA setup) + +```diff +-FROM spxiong/pytorch:2.11.0-py3.10.19-cuda13.0.2-ubuntu22.04 ++FROM spxiong/pytorch:2.11.0-py3.10.19-cuda12.8.0-ubuntu22.04 +-RUN python -m pip install --no-cache-dir flashinfer-jit-cache --index-url https://flashinfer.ai/whl/cu130 --no-deps || true ++RUN python -m pip install --no-cache-dir flashinfer-jit-cache --index-url https://flashinfer.ai/whl/cu128 --no-deps || true +``` + + +Notes: + +- In `docker-compose.yml`, `gpus: all` exposes all visible GPUs to the container. If you want to use only one GPU, limit the exposed device(s) (syntax varies across Docker/Compose versions). +- You may need to change the Ubuntu apt mirror depending on your network environment. `Dockerfile` supports the `UBUNTU_MIRROR` build arg (defaults to Aliyun mirrors). + # 📦 Model Download | Model Name | Huggingface Repository | ModelScope Repository | Description | diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..7a39dc5 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,24 @@ +services: + lingbot-map: + build: + context: . + image: lingbot-map:dev + working_dir: /workspace + volumes: + - .:/workspace + - hf_cache:/root/.cache/huggingface + - torch_cache:/root/.cache/torch + environment: + - HF_HOME=/root/.cache/huggingface + - TORCH_HOME=/root/.cache/torch + ports: + - "8080:8080" + gpus: all + shm_size: "32gb" + stdin_open: true + tty: true + command: bash + +volumes: + hf_cache: + torch_cache: