Skip to content

issue with pydantic version #12

@PeteEdley-EdleyIT

Description

@PeteEdley-EdleyIT

When I try running this image I get the following error.

[llmapi] | 2025-08-27 18:21:36,867 - app - INFO - loaded Flask
[llmapi] | [2025-08-27 18:21:36 +0000] [3] [ERROR] Exception in worker process
[llmapi] | Traceback (most recent call last):
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/arbiter.py", line 609, in spawn_worker
[llmapi] | worker.init_process()
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/workers/gthread.py", line 95, in init_process
[llmapi] | super().init_process()
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/workers/base.py", line 134, in init_process
[llmapi] | self.load_wsgi()
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/workers/base.py", line 146, in load_wsgi
[llmapi] | self.wsgi = self.app.wsgi()
[llmapi] | ^^^^^^^^^^^^^^^
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/app/base.py", line 67, in wsgi
[llmapi] | self.callable = self.load()
[llmapi] | ^^^^^^^^^^^
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/app/wsgiapp.py", line 58, in load
[llmapi] | return self.load_wsgiapp()
[llmapi] | ^^^^^^^^^^^^^^^^^^^
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/app/wsgiapp.py", line 48, in load_wsgiapp
[llmapi] | return util.import_app(self.app_uri)
[llmapi] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[llmapi] | File "/usr/local/lib/python3.12/site-packages/gunicorn/util.py", line 371, in import_app
[llmapi] | mod = importlib.import_module(module)
[llmapi] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[llmapi] | File "/usr/local/lib/python3.12/importlib/init.py", line 90, in import_module
[llmapi] | return _bootstrap._gcd_import(name[level:], package, level)
[llmapi] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[llmapi] | File "", line 1387, in _gcd_import
[llmapi] | File "", line 1360, in _find_and_load
[llmapi] | File "", line 1331, in find_and_load_unlocked
[llmapi] | File "", line 935, in load_unlocked
[llmapi] | File "", line 995, in exec_module
[llmapi] | File "", line 488, in call_with_frames_removed
[llmapi] | File "/app/app.py", line 24, in
[llmapi] | from chat import chat_with_memory,complete_code,rag_chat,rag_ingest
[llmapi] | File "/app/chat.py", line 2, in
[llmapi] | from utils import get_user_message_query
[llmapi] | File "/app/utils.py", line 2, in
[llmapi] | from llama_index.core.llms import ChatMessage
[llmapi] | File "/usr/local/lib/python3.12/site-packages/llama_index/core/init.py", line 10, in
[llmapi] | from llama_index.core.base.response.schema import Response
[llmapi] | File "/usr/local/lib/python3.12/site-packages/llama_index/core/base/response/schema.py", line 7, in
[llmapi] | from llama_index.core.async_utils import asyncio_run
[llmapi] | File "/usr/local/lib/python3.12/site-packages/llama_index/core/async_utils.py", line 7, in
[llmapi] | import llama_index.core.instrumentation as instrument
[llmapi] | File "/usr/local/lib/python3.12/site-packages/llama_index/core/instrumentation/init.py", line 5, in
[llmapi] | from llama_index.core.instrumentation.dispatcher import (
[llmapi] | File "/usr/local/lib/python3.12/site-packages/llama_index/core/instrumentation/dispatcher.py", line 26, in
[llmapi] | class Dispatcher(BaseModel):
[llmapi] | File "/usr/local/lib/python3.12/site-packages/pydantic/v1/main.py", line 286, in new
[llmapi] | cls.try_update_forward_refs()
[llmapi] | File "/usr/local/lib/python3.12/site-packages/pydantic/v1/main.py", line 807, in try_update_forward_refs
[llmapi] | update_model_forward_refs(cls, cls.fields.values(), cls.config.json_encoders, localns, (NameError,))
[llmapi] | File "/usr/local/lib/python3.12/site-packages/pydantic/v1/typing.py", line 554, in update_model_forward_refs
[llmapi] | update_field_forward_refs(f, globalns=globalns, localns=localns)
[llmapi] | File "/usr/local/lib/python3.12/site-packages/pydantic/v1/typing.py", line 520, in update_field_forward_refs
[llmapi] | field.type
= evaluate_forwardref(field.type
, globalns, localns or None)
[llmapi] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[llmapi] | File "/usr/local/lib/python3.12/site-packages/pydantic/v1/typing.py", line 66, in evaluate_forwardref
[llmapi] | return cast(Any, type
)._evaluate(globalns, localns, set())
[llmapi] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[llmapi] | TypeError: ForwardRef._evaluate() missing 1 required keyword-only argument: 'recursive_guard'
[llmapi] | [2025-08-27 18:21:36 +0000] [3] [INFO] Worker exiting (pid: 3)
[llmapi] | [2025-08-27 18:21:36 +0000] [1] [ERROR] Worker (pid:3) exited with code 3
[llmapi] | [2025-08-27 18:21:36 +0000] [1] [ERROR] Shutting down: Master
[llmapi] | [2025-08-27 18:21:36 +0000] [1] [ERROR] Reason: Worker failed to boot.

here is my compose file.
version: "3.8"

services:
  # Ollama container service
  ollama:
    image: ollama/ollama
    ports:
      - "11434:11434"
    networks:
      - llm-network
    volumes:
      - ollama:/root/.ollama
    container_name: ollama-container
    environment:
      - NVIDIA_VISIBLE_DEVICES=all
      - NVIDIA_DRIVER_CAPABILITIES=all
    deploy:
      resources:
        reservations:
          devices:
            - driver: nvidia
              count: all
              capabilities: [gpu]

  # LLM API container service for configurating RAG, Remote Models, Caching.
  # Can be configured to connect with Ollama running inside a container/host.
  llmapi:
    image: nagaraj23/local-llm-api
    ports:
      - "5000:5000"
    networks:
      - llm-network
    volumes:
      - ragdir:/ragdir
      - ragstorage:/ragstorage
    container_name: llm-api-container
    environment:
      GUNICORN_CMD_ARGS: --bind=0.0.0.0:5000 --workers=1 --threads=4
      LOG_LEVEL: INFO
      MODEL_NAME: local/gemma:2b
      CODE_MODEL_NAME: local/codegemma:2b
      EMBED_MODEL_NAME: local/nomic-embed-text
      API_KEY:
      EMBED_API_KEY:
      API_TIMEOUT: 600

      # Connect to Ollama running at host
      #OLLAMA_HOST: host.docker.internal

      # Connect to Ollama container service
      OLLAMA_HOST: ollama

      OLLAMA_PORT: 11434
      REDIS_HOST: cache
      REDIS_PORT: 6379
      REDIS_PASSWORD: redis-stack
      REDIS_EXPIRY: 3600

  # Redis cache service
  # Supports caching chat history and searching history by keyword/chat Id
  cache:
    image: redis/redis-stack:latest
    restart: always
    networks:
      - llm-network
    ports:
      - "6379:6379"
    environment:
      REDIS_ARGS: "--requirepass redis-stack"
    volumes:
      - cachedir:/data

networks:
  llm-network:

volumes:
  ollama:

  # Volume mapping on host device path to store document embeddings.
  ragstorage:
    driver: local
    driver_opts:
      o: bind
      type: none
      device: ./llmdata/ragstorage

  # Volume mapping on host device path to store documents for Q & A.
  ragdir:
    driver: local
    driver_opts:
      o: bind
      type: none
      device: ./llmdata/ragdir

  # Cache directory for Redis
  cachedir:
    driver: local
    driver_opts:
      o: bind
      type: none
      device: ./llmdata/cachedir

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions