Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
"java/instrumentation/openinference-instrumentation-langchain4j": "0.1.8",
"java/instrumentation/openinference-instrumentation-springAI": "0.1.9",
"java/instrumentation/openinference-instrumentation-annotation": "0.1.2",
"python/instrumentation/openinference-instrumentation-pipecat": "0.1.3",
"python/instrumentation/openinference-instrumentation-pipecat": "0.1.4",
"python/instrumentation/openinference-instrumentation-agentspec": "0.1.0",
"python/instrumentation/openinference-instrumentation-strands-agents": "0.1.2",
"python/instrumentation/openinference-instrumentation-agent-framework": "0.1.4",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -818,10 +818,16 @@ def extract_from_service(self, service: FrameProcessor) -> Dict[str, Any]:

# Extract LLM settings/configuration as metadata
if hasattr(service, "_settings"):
if isinstance(service._settings, dict):
results[SpanAttributes.METADATA] = safe_json_dumps(service._settings)
else:
results[SpanAttributes.METADATA] = str(service._settings)
settings = service._settings
if not isinstance(settings, dict):
# Convert Pydantic models, dataclasses, or other objects to dicts
if hasattr(settings, "model_dump") and callable(settings.model_dump):
settings = settings.model_dump()
elif hasattr(settings, "dict") and callable(settings.dict):
settings = settings.dict()
elif hasattr(settings, "__dict__"):
settings = settings.__dict__
results[SpanAttributes.METADATA] = safe_json_dumps(settings)
Comment thread
duncankmckinnon marked this conversation as resolved.
Comment thread
duncankmckinnon marked this conversation as resolved.

return results

Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.1.3"
__version__ = "0.1.4"
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
"""Tests for attribute extraction from Pipecat frames and services."""

import json
from dataclasses import dataclass
from unittest.mock import Mock

from pipecat.frames.frames import (
Expand Down Expand Up @@ -379,3 +381,77 @@ def failing_extractor() -> None:

result = safe_extract(failing_extractor)
assert result is None


class TestLLMSettingsMetadataSerialization:
"""Test that non-dict LLM settings are serialized as valid JSON metadata."""

def _make_llm_service_with_settings(self, settings: object) -> Mock:
"""Create a mock LLM service with the given settings object."""
from pipecat.services.llm_service import LLMService

class TestLLMService(LLMService):
pass

service = Mock(spec=TestLLMService)
service.__class__ = TestLLMService
service.__class__.__module__ = "pipecat.services.google"
service.model_name = "gemini-3-flash-preview"
service._settings = settings
return service

def test_pydantic_v2_model_dump(self) -> None:
"""Test that a Pydantic v2 model is serialized via model_dump()."""
from pydantic import BaseModel

class GoogleLLMSettings(BaseModel):
model: str = "gemini-3-flash-preview"
temperature: float = 0.7
max_tokens: int = 4096

settings = GoogleLLMSettings()
service = self._make_llm_service_with_settings(settings)
attributes = extract_service_attributes(service)

metadata = attributes[SpanAttributes.METADATA]
parsed = json.loads(metadata)
assert parsed["model"] == "gemini-3-flash-preview"
assert parsed["temperature"] == 0.7
assert parsed["max_tokens"] == 4096

def test_object_with_dict_method(self) -> None:
"""Test that an object with a .dict() method is serialized via .dict()."""

class LegacySettings:
def __init__(self) -> None:
self.model = "legacy-model"
self.temperature = 0.5

def dict(self) -> dict:
return {"model": self.model, "temperature": self.temperature}

settings = LegacySettings()
service = self._make_llm_service_with_settings(settings)
attributes = extract_service_attributes(service)

metadata = attributes[SpanAttributes.METADATA]
parsed = json.loads(metadata)
assert parsed["model"] == "legacy-model"
assert parsed["temperature"] == 0.5

def test_dataclass_settings(self) -> None:
"""Test that a dataclass is serialized via __dict__."""

@dataclass
class DataclassSettings:
model: str = "dataclass-model"
max_tokens: int = 1024

settings = DataclassSettings()
service = self._make_llm_service_with_settings(settings)
attributes = extract_service_attributes(service)

metadata = attributes[SpanAttributes.METADATA]
parsed = json.loads(metadata)
assert parsed["model"] == "dataclass-model"
assert parsed["max_tokens"] == 1024
Loading