-
Notifications
You must be signed in to change notification settings - Fork 102
Expand file tree
/
Copy pathDockerfile
More file actions
220 lines (165 loc) · 7.58 KB
/
Dockerfile
File metadata and controls
220 lines (165 loc) · 7.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
# syntax=docker/dockerfile:1
ARG GO_VERSION=1.25
ARG LLAMA_SERVER_VERSION=latest
ARG LLAMA_SERVER_VARIANT=cpu
ARG LLAMA_BINARY_PATH=/com.docker.llama-server.native.linux.${LLAMA_SERVER_VARIANT}.${TARGETARCH}
# only 26.04 for cpu variant for max hardware support with vulkan
# use 22.04 for gpu variants to match ROCm/CUDA base images
ARG BASE_IMAGE=ubuntu:26.04
ARG VERSION=dev
FROM docker.io/library/golang:${GO_VERSION}-bookworm AS builder
ARG VERSION
# Install git for go mod download if needed
RUN apt-get update && apt-get install -y --no-install-recommends git && rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copy go mod/sum first for better caching
COPY --link go.mod go.sum ./
# Download dependencies (with cache mounts)
RUN --mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
go mod download
# Copy the rest of the source code
COPY --link . .
# Build the Go binary (static build)
RUN --mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
CGO_ENABLED=1 GOOS=linux go build -ldflags="-s -w -X main.Version=${VERSION}" -o model-runner .
# Build the Go binary for SGLang (without vLLM)
FROM builder AS builder-sglang
ARG VERSION
RUN --mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
CGO_ENABLED=1 GOOS=linux go build -tags=novllm -ldflags="-s -w -X main.Version=${VERSION}" -o model-runner .
# --- Get llama.cpp binary ---
FROM docker/docker-model-backend-llamacpp:${LLAMA_SERVER_VERSION}-${LLAMA_SERVER_VARIANT} AS llama-server
# --- Final image ---
FROM docker.io/${BASE_IMAGE} AS llamacpp
ARG LLAMA_SERVER_VARIANT
# Create non-root user
RUN groupadd --system modelrunner && useradd --system --gid modelrunner -G video --create-home --home-dir /home/modelrunner modelrunner
# TODO: if the render group ever gets a fixed GID add modelrunner to it
COPY scripts/ /scripts/
# Install ca-certificates for HTTPS and vulkan
RUN /scripts/apt-install.sh && rm -rf /scripts
WORKDIR /app
# Create directories for the socket file and llama.cpp binary, and set proper permissions
RUN mkdir -p /var/run/model-runner /app/bin /models && \
chown -R modelrunner:modelrunner /var/run/model-runner /app /models && \
chmod -R 755 /models
# Copy the llama.cpp binary from the llama-server stage
ARG LLAMA_BINARY_PATH
COPY --from=llama-server ${LLAMA_BINARY_PATH}/ /app/.
RUN chmod +x /app/bin/com.docker.llama-server
USER modelrunner
# Set the environment variable for the socket path and LLaMA server binary path
ENV MODEL_RUNNER_SOCK=/var/run/model-runner/model-runner.sock
ENV MODEL_RUNNER_PORT=12434
ENV LLAMA_SERVER_PATH=/app/bin
ENV HOME=/home/modelrunner
ENV MODELS_PATH=/models
ENV LD_LIBRARY_PATH=/app/lib
# Label the image so that it's hidden on cloud engines.
LABEL com.docker.desktop.service="model-runner"
ENTRYPOINT ["/app/model-runner"]
# --- vLLM variant ---
FROM llamacpp AS vllm
ARG VLLM_VERSION=0.12.0
ARG VLLM_CUDA_VERSION=cu130
ARG VLLM_PYTHON_TAG=cp38-abi3
ARG TARGETARCH
USER root
RUN apt update && apt install -y python3 python3-venv python3-dev curl ca-certificates build-essential && rm -rf /var/lib/apt/lists/*
RUN mkdir -p /opt/vllm-env && chown -R modelrunner:modelrunner /opt/vllm-env
USER modelrunner
# Install uv and vLLM as modelrunner user
RUN curl -LsSf https://astral.sh/uv/install.sh | sh \
&& ~/.local/bin/uv venv --python /usr/bin/python3 /opt/vllm-env \
&& if [ "$TARGETARCH" = "amd64" ]; then \
WHEEL_ARCH="manylinux_2_31_x86_64"; \
WHEEL_URL="https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}%2B${VLLM_CUDA_VERSION}-${VLLM_PYTHON_TAG}-${WHEEL_ARCH}.whl"; \
~/.local/bin/uv pip install --python /opt/vllm-env/bin/python "$WHEEL_URL"; \
else \
~/.local/bin/uv pip install --python /opt/vllm-env/bin/python "vllm==${VLLM_VERSION}"; \
fi
RUN /opt/vllm-env/bin/python -c "import vllm; print(vllm.__version__)" > /opt/vllm-env/version
# --- SGLang variant ---
FROM llamacpp AS sglang
ARG SGLANG_VERSION=0.5.6
USER root
# Install CUDA toolkit 13 for nvcc (needed for flashinfer JIT compilation)
RUN apt update && apt install -y \
python3 python3-venv python3-dev \
curl ca-certificates build-essential \
libnuma1 libnuma-dev numactl ninja-build \
wget gnupg \
&& wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb \
&& dpkg -i cuda-keyring_1.1-1_all.deb \
&& apt update && apt install -y cuda-toolkit-13-0 \
&& rm cuda-keyring_1.1-1_all.deb \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir -p /opt/sglang-env && chown -R modelrunner:modelrunner /opt/sglang-env
USER modelrunner
# Set CUDA paths for nvcc (needed during flashinfer compilation)
ENV PATH=/usr/local/cuda-13.0/bin:$PATH
ENV LD_LIBRARY_PATH=/usr/local/cuda-13.0/lib64:$LD_LIBRARY_PATH
# Install uv and SGLang as modelrunner user
RUN curl -LsSf https://astral.sh/uv/install.sh | sh \
&& ~/.local/bin/uv venv --python /usr/bin/python3 /opt/sglang-env \
&& ~/.local/bin/uv pip install --python /opt/sglang-env/bin/python "sglang==${SGLANG_VERSION}"
RUN /opt/sglang-env/bin/python -c "import sglang; print(sglang.__version__)" > /opt/sglang-env/version
# --- Diffusers variant ---
FROM llamacpp AS diffusers
# Python package versions for reproducible builds
ARG DIFFUSERS_VERSION=0.36.0
ARG TORCH_VERSION=2.9.1
ARG TRANSFORMERS_VERSION=4.57.5
ARG ACCELERATE_VERSION=1.3.0
ARG SAFETENSORS_VERSION=0.5.2
ARG HUGGINGFACE_HUB_VERSION=0.34.0
ARG BITSANDBYTES_VERSION=0.49.1
ARG FASTAPI_VERSION=0.115.12
ARG UVICORN_VERSION=0.34.1
ARG PILLOW_VERSION=11.2.1
USER root
RUN apt update && apt install -y \
python3 python3-venv python3-dev \
curl ca-certificates build-essential \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir -p /opt/diffusers-env && chown -R modelrunner:modelrunner /opt/diffusers-env
USER modelrunner
# Install uv and diffusers as modelrunner user
RUN curl -LsSf https://astral.sh/uv/install.sh | sh \
&& ~/.local/bin/uv venv --python /usr/bin/python3 /opt/diffusers-env \
&& ~/.local/bin/uv pip install --python /opt/diffusers-env/bin/python \
"diffusers==${DIFFUSERS_VERSION}" \
"torch==${TORCH_VERSION}" \
"transformers==${TRANSFORMERS_VERSION}" \
"accelerate==${ACCELERATE_VERSION}" \
"safetensors==${SAFETENSORS_VERSION}" \
"huggingface_hub==${HUGGINGFACE_HUB_VERSION}" \
"bitsandbytes==${BITSANDBYTES_VERSION}" \
"fastapi==${FASTAPI_VERSION}" \
"uvicorn[standard]==${UVICORN_VERSION}" \
"pillow==${PILLOW_VERSION}"
# Copy Python server code
USER root
COPY python/diffusers_server /tmp/diffusers_server/
RUN PYTHON_SITE_PACKAGES=$(/opt/diffusers-env/bin/python -c "import site; print(site.getsitepackages()[0])") && \
mkdir -p "$PYTHON_SITE_PACKAGES/diffusers_server" && \
cp -r /tmp/diffusers_server/* "$PYTHON_SITE_PACKAGES/diffusers_server/" && \
chown -R modelrunner:modelrunner "$PYTHON_SITE_PACKAGES/diffusers_server/" && \
rm -rf /tmp/diffusers_server
USER modelrunner
RUN /opt/diffusers-env/bin/python -c "import diffusers; print(diffusers.__version__)" > /opt/diffusers-env/version
FROM llamacpp AS final-llamacpp
# Copy the built binary from builder
COPY --from=builder /app/model-runner /app/model-runner
FROM vllm AS final-vllm
# Copy the built binary from builder
COPY --from=builder /app/model-runner /app/model-runner
FROM sglang AS final-sglang
# Copy the built binary from builder-sglang (without vLLM)
COPY --from=builder-sglang /app/model-runner /app/model-runner
FROM diffusers AS final-diffusers
# Copy the built binary from builder (with diffusers support)
COPY --from=builder /app/model-runner /app/model-runner