From 9ccaf67def185a16471cea7ebc4137bd766fa97e Mon Sep 17 00:00:00 2001 From: mashie20 <37959328+mashie20@users.noreply.github.com> Date: Mon, 10 Feb 2025 22:23:27 -0800 Subject: [PATCH] llama Update docker-compose.yml changes the llama to pull all available models instead of just 1 embed and llama3.2 --- local-ai-packaged/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/local-ai-packaged/docker-compose.yml b/local-ai-packaged/docker-compose.yml index 7424816d..2cfeb8e7 100644 --- a/local-ai-packaged/docker-compose.yml +++ b/local-ai-packaged/docker-compose.yml @@ -43,7 +43,7 @@ x-init-ollama: &init-ollama entrypoint: /bin/sh command: - "-c" - - "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull llama3.1; OLLAMA_HOST=ollama:11434 ollama pull nomic-embed-text" + - "sleep 3; ollama list-models | awk 'NR>1 {print $1}' | nl -v '%d. %s' || echo 'No models found'" services: flowise: