Skip to content

Commit 860b40d

Browse files
committed
refactor(backends): standardize status reporting across all backends
Signed-off-by: Dorin Geman <dorin.geman@docker.com>
1 parent 9e75f04 commit 860b40d

File tree

11 files changed

+194
-45
lines changed

11 files changed

+194
-45
lines changed

cmd/cli/commands/status.go

Lines changed: 57 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,20 @@
11
package commands
22

33
import (
4+
"bytes"
45
"encoding/json"
56
"fmt"
67
"net"
78
"os"
9+
"sort"
810
"strconv"
911

1012
"github.com/docker/cli/cli-plugins/hooks"
1113
"github.com/docker/model-runner/cmd/cli/commands/completion"
1214
"github.com/docker/model-runner/cmd/cli/desktop"
1315
"github.com/docker/model-runner/cmd/cli/pkg/standalone"
1416
"github.com/docker/model-runner/cmd/cli/pkg/types"
17+
"github.com/docker/model-runner/pkg/inference"
1518
"github.com/spf13/cobra"
1619
)
1720

@@ -56,19 +59,67 @@ func newStatusCmd() *cobra.Command {
5659
func textStatus(cmd *cobra.Command, status desktop.Status, backendStatus map[string]string) {
5760
if status.Running {
5861
cmd.Println("Docker Model Runner is running")
59-
cmd.Println("\nStatus:")
60-
for b, s := range backendStatus {
61-
if s != "not running" {
62-
cmd.Println(b+":", s)
63-
}
64-
}
62+
cmd.Println()
63+
cmd.Print(backendStatusTable(backendStatus))
6564
} else {
6665
cmd.Println("Docker Model Runner is not running")
6766
hooks.PrintNextSteps(cmd.OutOrStdout(), []string{enableViaCLI, enableViaGUI})
6867
osExit(1)
6968
}
7069
}
7170

71+
func backendStatusTable(backendStatus map[string]string) string {
72+
var buf bytes.Buffer
73+
table := newTable(&buf)
74+
table.Header([]string{"BACKEND", "STATUS", "DETAILS"})
75+
76+
type backendInfo struct {
77+
name string
78+
statusType string
79+
details string
80+
sortOrder int
81+
}
82+
83+
backends := make([]backendInfo, 0, len(backendStatus))
84+
for name, statusText := range backendStatus {
85+
statusType, details := inference.ParseStatus(statusText)
86+
87+
// Assign sort order: Running < Error < Not Installed < Installing
88+
sortOrder := 4
89+
switch statusType {
90+
case inference.StatusRunning:
91+
sortOrder = 0
92+
case inference.StatusError:
93+
sortOrder = 1
94+
case inference.StatusNotInstalled:
95+
sortOrder = 2
96+
case inference.StatusInstalling:
97+
sortOrder = 3
98+
}
99+
100+
backends = append(backends, backendInfo{
101+
name: name,
102+
statusType: statusType,
103+
details: details,
104+
sortOrder: sortOrder,
105+
})
106+
}
107+
108+
sort.Slice(backends, func(i, j int) bool {
109+
if backends[i].sortOrder != backends[j].sortOrder {
110+
return backends[i].sortOrder < backends[j].sortOrder
111+
}
112+
return backends[i].name < backends[j].name
113+
})
114+
115+
for _, backend := range backends {
116+
table.Append([]string{backend.name, backend.statusType, backend.details})
117+
}
118+
119+
table.Render()
120+
return buf.String()
121+
}
122+
72123
func makeEndpoint(host string, port int) string {
73124
return "http://" + net.JoinHostPort(host, strconv.Itoa(port)) + "/v1/"
74125
}

pkg/inference/backend.go

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import (
55
"encoding/json"
66
"fmt"
77
"net/http"
8+
"strings"
89
"time"
910
)
1011

@@ -24,6 +25,93 @@ const (
2425
BackendModeImageGeneration
2526
)
2627

28+
// Backend status constants for standardized status reporting.
29+
// Backends should use these prefixes when reporting their status.
30+
const (
31+
// StatusRunning indicates the backend is operational and ready.
32+
// Format: "Running: <details>" (e.g., "Running: vllm v0.1.0")
33+
StatusRunning = "Running"
34+
35+
// StatusError indicates the backend encountered an error.
36+
// Format: "Error: <details>" (e.g., "Error: installation failed")
37+
StatusError = "Error"
38+
39+
// StatusNotInstalled indicates the backend is not installed.
40+
// Format: "Not Installed: <details>" or just "Not Installed"
41+
StatusNotInstalled = "Not Installed"
42+
43+
// StatusInstalling indicates the backend is currently being installed.
44+
// Format: "Installing: <details>" or just "Installing"
45+
StatusInstalling = "Installing"
46+
)
47+
48+
// Common status detail messages for consistent reporting across backends.
49+
const (
50+
DetailBinaryNotFound = "binary not found"
51+
DetailPackageNotInstalled = "package not installed"
52+
DetailImportFailed = "import failed"
53+
DetailVersionUnknown = "version unknown"
54+
DetailPythonNotFound = "Python not found"
55+
DetailOnlyLinux = "only supported on Linux"
56+
DetailOnlyAppleSilicon = "only supported on Apple Silicon"
57+
DetailDownloading = "downloading"
58+
DetailCheckingForUpdates = "checking for updates"
59+
)
60+
61+
// FormatStatus formats a backend status with optional details.
62+
// If details is empty, returns just the status type.
63+
// Otherwise, returns "Status: details".
64+
func FormatStatus(statusType, details string) string {
65+
if details == "" {
66+
return statusType
67+
}
68+
return statusType + ": " + details
69+
}
70+
71+
// ParseStatus splits a formatted status string into type and details.
72+
// Returns the status type and details separately.
73+
func ParseStatus(status string) (statusType, details string) {
74+
if status == "" {
75+
return StatusNotInstalled, ""
76+
}
77+
78+
for _, prefix := range []string{StatusRunning, StatusError, StatusNotInstalled, StatusInstalling} {
79+
if status == prefix {
80+
return prefix, ""
81+
}
82+
if details, found := strings.CutPrefix(status, prefix+": "); found {
83+
return prefix, details
84+
}
85+
}
86+
87+
return StatusError, status
88+
}
89+
90+
// FormatRunning formats a running status with version/details.
91+
// Example: FormatRunning("vllm 0.1.0") -> "Running: vllm 0.1.0"
92+
func FormatRunning(details string) string {
93+
return FormatStatus(StatusRunning, details)
94+
}
95+
96+
// FormatError formats an error status with error message.
97+
// Example: FormatError("installation failed") -> "Error: installation failed"
98+
func FormatError(details string) string {
99+
return FormatStatus(StatusError, details)
100+
}
101+
102+
// FormatNotInstalled formats a not installed status with optional details.
103+
// Example: FormatNotInstalled("package not found") -> "Not Installed: package not found"
104+
// Example: FormatNotInstalled("") -> "Not Installed"
105+
func FormatNotInstalled(details string) string {
106+
return FormatStatus(StatusNotInstalled, details)
107+
}
108+
109+
// FormatInstalling formats an installing status with optional details.
110+
// Example: FormatInstalling("downloading") -> "Installing: downloading"
111+
func FormatInstalling(details string) string {
112+
return FormatStatus(StatusInstalling, details)
113+
}
114+
27115
type ErrGGUFParse struct {
28116
Err error
29117
}

pkg/inference/backends/diffusers/diffusers.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ func New(log logging.Logger, modelManager *models.Manager, serverLog logging.Log
6363
modelManager: modelManager,
6464
serverLog: serverLog,
6565
config: conf,
66-
status: "not installed",
66+
status: inference.FormatNotInstalled(""),
6767
customPythonPath: customPythonPath,
6868
}, nil
6969
}
@@ -88,6 +88,7 @@ func (d *diffusers) UsesTCP() bool {
8888
// Install implements inference.Backend.Install.
8989
func (d *diffusers) Install(_ context.Context, _ *http.Client) error {
9090
if !platform.SupportsDiffusers() {
91+
d.status = inference.FormatNotInstalled(inference.DetailOnlyLinux)
9192
return ErrNotImplemented
9293
}
9394

@@ -104,7 +105,7 @@ func (d *diffusers) Install(_ context.Context, _ *http.Client) error {
104105
// Fall back to system Python
105106
systemPython, err := exec.LookPath("python3")
106107
if err != nil {
107-
d.status = ErrPythonNotFound.Error()
108+
d.status = inference.FormatError(inference.DetailPythonNotFound)
108109
return ErrPythonNotFound
109110
}
110111
pythonPath = systemPython
@@ -115,7 +116,7 @@ func (d *diffusers) Install(_ context.Context, _ *http.Client) error {
115116

116117
// Check if diffusers is installed
117118
if err := d.pythonCmd("-c", "import diffusers").Run(); err != nil {
118-
d.status = "diffusers package not installed"
119+
d.status = inference.FormatNotInstalled(inference.DetailPackageNotInstalled)
119120
d.log.Warnf("diffusers package not found. Install with: uv pip install diffusers torch")
120121
return ErrDiffusersNotFound
121122
}
@@ -124,9 +125,9 @@ func (d *diffusers) Install(_ context.Context, _ *http.Client) error {
124125
output, err := d.pythonCmd("-c", "import diffusers; print(diffusers.__version__)").Output()
125126
if err != nil {
126127
d.log.Warnf("could not get diffusers version: %v", err)
127-
d.status = "running diffusers version: unknown"
128+
d.status = inference.FormatRunning(inference.DetailVersionUnknown)
128129
} else {
129-
d.status = fmt.Sprintf("running diffusers version: %s", strings.TrimSpace(string(output)))
130+
d.status = inference.FormatRunning(fmt.Sprintf("diffusers %s", strings.TrimSpace(string(output))))
130131
}
131132

132133
return nil

pkg/inference/backends/llamacpp/download.go

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
"strings"
1616
"sync"
1717

18+
"github.com/docker/model-runner/pkg/inference"
1819
"github.com/docker/model-runner/pkg/internal/dockerhub"
1920
"github.com/docker/model-runner/pkg/logging"
2021
)
@@ -100,8 +101,7 @@ func (l *llamaCpp) downloadLatestLlamaCpp(ctx context.Context, log logging.Logge
100101
if err != nil {
101102
return fmt.Errorf("failed to read bundled llama.cpp version: %w", err)
102103
} else if strings.TrimSpace(string(data)) == latest {
103-
l.status = fmt.Sprintf("running llama.cpp %s (%s) version: %s",
104-
desiredTag, latest, getLlamaCppVersion(log, filepath.Join(vendoredServerStoragePath, "com.docker.llama-server")))
104+
l.setRunningStatus(log, filepath.Join(vendoredServerStoragePath, "com.docker.llama-server"), desiredTag, latest)
105105
return errLlamaCppUpToDate
106106
}
107107

@@ -112,8 +112,7 @@ func (l *llamaCpp) downloadLatestLlamaCpp(ctx context.Context, log logging.Logge
112112
} else if strings.TrimSpace(string(data)) == latest {
113113
log.Infoln("current llama.cpp version is already up to date")
114114
if _, statErr := os.Stat(llamaCppPath); statErr == nil {
115-
l.status = fmt.Sprintf("running llama.cpp %s (%s) version: %s",
116-
desiredTag, latest, getLlamaCppVersion(log, llamaCppPath))
115+
l.setRunningStatus(log, llamaCppPath, desiredTag, latest)
117116
return nil
118117
}
119118
log.Infoln("llama.cpp binary must be updated, proceeding to update it")
@@ -128,7 +127,7 @@ func (l *llamaCpp) downloadLatestLlamaCpp(ctx context.Context, log logging.Logge
128127
}
129128
defer os.RemoveAll(downloadDir)
130129

131-
l.status = fmt.Sprintf("downloading %s (%s) variant of llama.cpp", desiredTag, latest)
130+
l.status = inference.FormatInstalling(fmt.Sprintf("%s llama.cpp %s", inference.DetailDownloading, desiredTag))
132131
if extractErr := extractFromImage(ctx, log, image, runtime.GOOS, runtime.GOARCH, downloadDir); extractErr != nil {
133132
return fmt.Errorf("could not extract image: %w", extractErr)
134133
}
@@ -164,7 +163,7 @@ func (l *llamaCpp) downloadLatestLlamaCpp(ctx context.Context, log logging.Logge
164163
}
165164

166165
log.Infoln("successfully updated llama.cpp binary")
167-
l.status = fmt.Sprintf("running llama.cpp %s (%s) version: %s", desiredTag, latest, getLlamaCppVersion(log, llamaCppPath))
166+
l.setRunningStatus(log, llamaCppPath, desiredTag, latest)
168167
log.Infoln(l.status)
169168

170169
if err := os.WriteFile(currentVersionFile, []byte(latest), 0o644); err != nil {
@@ -188,6 +187,15 @@ func extractFromImage(ctx context.Context, log logging.Logger, image, requiredOs
188187
return dockerhub.Extract(imageTar, requiredArch, requiredOs, destination)
189188
}
190189

190+
func (l *llamaCpp) setRunningStatus(log logging.Logger, binaryPath, variant, digest string) {
191+
version := getLlamaCppVersion(log, binaryPath)
192+
if variant == "" && digest == "" {
193+
l.status = inference.FormatRunning(fmt.Sprintf("llama.cpp %s", version))
194+
} else {
195+
l.status = inference.FormatRunning(fmt.Sprintf("llama.cpp %s (%s) %s", variant, digest, version))
196+
}
197+
}
198+
191199
func getLlamaCppVersion(log logging.Logger, llamaCpp string) string {
192200
output, err := exec.Command(llamaCpp, "--version").CombinedOutput()
193201
if err != nil {

pkg/inference/backends/llamacpp/download_linux.go

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ package llamacpp
22

33
import (
44
"context"
5-
"fmt"
65
"net/http"
76
"path/filepath"
87

@@ -12,7 +11,6 @@ import (
1211
func (l *llamaCpp) ensureLatestLlamaCpp(_ context.Context, log logging.Logger, _ *http.Client,
1312
_, vendoredServerStoragePath string,
1413
) error {
15-
l.status = fmt.Sprintf("running llama.cpp version: %s",
16-
getLlamaCppVersion(log, filepath.Join(vendoredServerStoragePath, "com.docker.llama-server")))
14+
l.setRunningStatus(log, filepath.Join(vendoredServerStoragePath, "com.docker.llama-server"), "", "")
1715
return errLlamaCppUpdateDisabled
1816
}

pkg/inference/backends/llamacpp/download_windows.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"path/filepath"
88
"runtime"
99

10+
"github.com/docker/model-runner/pkg/inference"
1011
"github.com/docker/model-runner/pkg/logging"
1112
)
1213

@@ -23,13 +24,13 @@ func (l *llamaCpp) ensureLatestLlamaCpp(ctx context.Context, log logging.Logger,
2324
case "amd64":
2425
canUseCUDA11, err = hasCUDA11CapableGPU(ctx, nvGPUInfoBin)
2526
if err != nil {
26-
l.status = fmt.Sprintf("failed to check CUDA 11 capability: %v", err)
27+
l.status = inference.FormatError(fmt.Sprintf("failed to check CUDA 11 capability: %v", err))
2728
return fmt.Errorf("failed to check CUDA 11 capability: %w", err)
2829
}
2930
case "arm64":
3031
canUseOpenCL, err = hasOpenCL()
3132
if err != nil {
32-
l.status = fmt.Sprintf("failed to check OpenCL capability: %v", err)
33+
l.status = inference.FormatError(fmt.Sprintf("failed to check OpenCL capability: %v", err))
3334
return fmt.Errorf("failed to check OpenCL capability: %w", err)
3435
}
3536
}
@@ -41,7 +42,7 @@ func (l *llamaCpp) ensureLatestLlamaCpp(ctx context.Context, log logging.Logger,
4142
} else if canUseOpenCL {
4243
desiredVariant = "opencl"
4344
}
44-
l.status = fmt.Sprintf("looking for updates for %s variant", desiredVariant)
45+
l.status = inference.FormatInstalling(fmt.Sprintf("%s llama.cpp %s", inference.DetailCheckingForUpdates, desiredVariant))
4546
return l.downloadLatestLlamaCpp(ctx, log, httpClient, llamaCppPath, vendoredServerStoragePath, desiredVersion,
4647
desiredVariant)
4748
}

pkg/inference/backends/llamacpp/llamacpp.go

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,6 @@ func (l *llamaCpp) Install(ctx context.Context, httpClient *http.Client) error {
109109
llamaServerBin = "com.docker.llama-server.exe"
110110
}
111111

112-
l.status = "installing"
113-
114112
// Temporary workaround for dynamically downloading llama.cpp from Docker Hub.
115113
// Internet access and an available docker/docker-model-backend-llamacpp:latest on Docker Hub are required.
116114
// Even if docker/docker-model-backend-llamacpp:latest has been downloaded before, we still require its
@@ -119,7 +117,7 @@ func (l *llamaCpp) Install(ctx context.Context, httpClient *http.Client) error {
119117
if err := l.ensureLatestLlamaCpp(ctx, l.log, httpClient, llamaCppPath, l.vendoredServerStoragePath); err != nil {
120118
l.log.Infof("failed to ensure latest llama.cpp: %v\n", err)
121119
if !errors.Is(err, errLlamaCppUpToDate) && !errors.Is(err, errLlamaCppUpdateDisabled) {
122-
l.status = fmt.Sprintf("failed to install llama.cpp: %v", err)
120+
l.status = inference.FormatError(fmt.Sprintf("failed to install llama.cpp: %v", err))
123121
}
124122
if errors.Is(err, context.Canceled) {
125123
return err

0 commit comments

Comments
 (0)