Skip to content

Commit 4d262fc

Browse files
committed
Release v4.3.1
1 parent c31c5b9 commit 4d262fc

File tree

32 files changed

+3443
-20
lines changed

32 files changed

+3443
-20
lines changed

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=4.3.0" \
19+
"praisonai>=4.3.1" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=4.3.0" \
23+
"praisonai>=4.3.1" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=4.3.0" \
19+
"praisonai>=4.3.1" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
"""
2+
Streaming with Reasoning Content Example
3+
4+
This example demonstrates how to handle streaming events with reasoning content
5+
from models like o1, o1-mini, and deepseek-reasoner.
6+
7+
The is_reasoning flag allows you to distinguish between:
8+
- Reasoning/thinking content (is_reasoning=True)
9+
- Final response content (is_reasoning=False)
10+
"""
11+
12+
from praisonaiagents import Agent
13+
from praisonaiagents.streaming import StreamEvent, StreamEventType, StreamMetrics
14+
15+
16+
def create_styled_callback():
17+
"""Create a callback that styles reasoning content differently."""
18+
19+
def callback(event: StreamEvent):
20+
if event.type == StreamEventType.DELTA_TEXT:
21+
if event.is_reasoning:
22+
# Dim gray for reasoning content
23+
print(f"\033[90m{event.content}\033[0m", end="", flush=True)
24+
else:
25+
# Normal for response content
26+
print(event.content, end="", flush=True)
27+
elif event.type == StreamEventType.STREAM_END:
28+
print() # Newline at end
29+
30+
return callback
31+
32+
33+
def create_collecting_callback():
34+
"""Create a callback that collects reasoning and response separately."""
35+
reasoning_parts = []
36+
response_parts = []
37+
38+
def callback(event: StreamEvent):
39+
if event.type == StreamEventType.DELTA_TEXT:
40+
if event.is_reasoning:
41+
reasoning_parts.append(event.content)
42+
else:
43+
response_parts.append(event.content)
44+
45+
def get_results():
46+
return {
47+
"reasoning": "".join(reasoning_parts),
48+
"response": "".join(response_parts)
49+
}
50+
51+
return callback, get_results
52+
53+
54+
def example_basic_streaming():
55+
"""Basic streaming with reasoning detection."""
56+
print("=" * 60)
57+
print("Example 1: Basic Streaming with Reasoning Detection")
58+
print("=" * 60)
59+
60+
agent = Agent(
61+
name="thinker",
62+
instructions="Think step by step before answering.",
63+
llm="gpt-4o-mini" # Change to o1-mini for actual reasoning
64+
)
65+
66+
# Add styled callback
67+
agent.stream_emitter.add_callback(create_styled_callback())
68+
69+
# Enable metrics
70+
agent.stream_emitter.enable_metrics()
71+
72+
# Run with streaming
73+
response = agent.start("What is 15 * 23?", stream=True)
74+
75+
# Show metrics
76+
metrics = agent.stream_emitter.get_metrics()
77+
if metrics:
78+
print(f"\n📊 {metrics.format_summary()}")
79+
80+
return response
81+
82+
83+
def example_collect_separately():
84+
"""Collect reasoning and response content separately."""
85+
print("\n" + "=" * 60)
86+
print("Example 2: Collect Reasoning and Response Separately")
87+
print("=" * 60)
88+
89+
agent = Agent(
90+
name="analyzer",
91+
instructions="Analyze the problem carefully.",
92+
llm="gpt-4o-mini"
93+
)
94+
95+
# Create collecting callback
96+
callback, get_results = create_collecting_callback()
97+
agent.stream_emitter.add_callback(callback)
98+
99+
# Run with streaming
100+
response = agent.start("Explain why the sky is blue.", stream=True)
101+
102+
# Get separated results
103+
results = get_results()
104+
105+
print(f"\n📝 Reasoning length: {len(results['reasoning'])} chars")
106+
print(f"📝 Response length: {len(results['response'])} chars")
107+
108+
return response
109+
110+
111+
def example_with_metrics():
112+
"""Track streaming performance metrics."""
113+
print("\n" + "=" * 60)
114+
print("Example 3: Streaming with Performance Metrics")
115+
print("=" * 60)
116+
117+
metrics = StreamMetrics()
118+
119+
def metrics_callback(event: StreamEvent):
120+
metrics.update_from_event(event)
121+
122+
if event.type == StreamEventType.FIRST_TOKEN:
123+
print("⚡ First token received!")
124+
elif event.type == StreamEventType.DELTA_TEXT:
125+
print(event.content, end="", flush=True)
126+
elif event.type == StreamEventType.STREAM_END:
127+
print()
128+
129+
agent = Agent(name="assistant", llm="gpt-4o-mini")
130+
agent.stream_emitter.add_callback(metrics_callback)
131+
132+
response = agent.start("Write a haiku about coding.", stream=True)
133+
134+
print("\n📊 Performance Metrics:")
135+
print(f" TTFT: {metrics.ttft * 1000:.0f}ms")
136+
print(f" Stream Duration: {metrics.stream_duration * 1000:.0f}ms")
137+
print(f" Total Time: {metrics.total_time * 1000:.0f}ms")
138+
print(f" Tokens: {metrics.token_count}")
139+
print(f" Speed: {metrics.tokens_per_second:.1f} tokens/sec")
140+
141+
return response
142+
143+
144+
def example_multi_agent_context():
145+
"""Show agent context in multi-agent scenarios."""
146+
print("\n" + "=" * 60)
147+
print("Example 4: Multi-Agent Context")
148+
print("=" * 60)
149+
150+
def context_callback(event: StreamEvent):
151+
if event.type == StreamEventType.DELTA_TEXT:
152+
prefix = f"[{event.agent_id}] " if event.agent_id else ""
153+
print(f"{prefix}{event.content}", end="", flush=True)
154+
elif event.type == StreamEventType.STREAM_END:
155+
print()
156+
157+
# Note: agent_id is populated when using multi-agent workflows
158+
agent = Agent(name="helper", llm="gpt-4o-mini")
159+
agent.stream_emitter.add_callback(context_callback)
160+
161+
response = agent.start("Say hello!", stream=True)
162+
163+
return response
164+
165+
166+
if __name__ == "__main__":
167+
print("🚀 PraisonAI Streaming with Reasoning Examples\n")
168+
169+
# Run examples
170+
example_basic_streaming()
171+
example_collect_separately()
172+
example_with_metrics()
173+
example_multi_agent_context()
174+
175+
print("\n✅ All examples completed!")

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 130 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@ def _is_file_path(value: str) -> bool:
150150
from ..task.task import Task
151151
from .handoff import Handoff, HandoffConfig, HandoffResult
152152
from ..rag.models import RAGResult, ContextPack
153+
from ..eval.results import EvaluationLoopResult
153154

154155
class Agent:
155156
# Class-level counter for generating unique display names for nameless agents
@@ -851,6 +852,24 @@ def __init__(
851852
elif memory is False:
852853
memory = None
853854

855+
# ─────────────────────────────────────────────────────────────────────
856+
# Resolve HISTORY from MemoryConfig - FAST PATH
857+
# History is enabled via memory="history" preset or MemoryConfig(history=True)
858+
# ─────────────────────────────────────────────────────────────────────
859+
_history_enabled = False
860+
_history_limit = 10
861+
_history_session_id = None
862+
863+
# Check if memory config has history settings
864+
if _memory_config is not None and isinstance(_memory_config, MemoryConfig):
865+
if _memory_config.history:
866+
_history_enabled = True
867+
_history_limit = _memory_config.history_limit
868+
869+
# Use auto_save session if no explicit session and history is enabled
870+
if _history_enabled and _history_session_id is None and auto_save:
871+
_history_session_id = auto_save
872+
854873
# ─────────────────────────────────────────────────────────────────────
855874
# Resolve KNOWLEDGE param - FAST PATH
856875
# ─────────────────────────────────────────────────────────────────────
@@ -1353,6 +1372,11 @@ def __init__(
13531372
# Used when session_id is provided but no DB adapter
13541373
self._session_store = None
13551374
self._session_store_initialized = False
1375+
1376+
# History injection settings (for auto-injecting session history into context)
1377+
self._history_enabled = _history_enabled
1378+
self._history_limit = _history_limit
1379+
self._history_session_id = _history_session_id
13561380

13571381
# Agent-centric feature instances (lazy loaded for zero performance impact)
13581382
self._auto_memory = auto_memory
@@ -2298,6 +2322,99 @@ def handoff_to(
22982322
)
22992323
return handoff_obj.execute_programmatic(self, prompt, context)
23002324

2325+
def run_until(
2326+
self,
2327+
prompt: str,
2328+
criteria: str,
2329+
threshold: float = 8.0,
2330+
max_iterations: int = 5,
2331+
mode: str = "optimize",
2332+
on_iteration: Optional[Callable[[Any], None]] = None,
2333+
verbose: bool = False,
2334+
) -> "EvaluationLoopResult":
2335+
"""
2336+
Run agent iteratively until output meets quality criteria.
2337+
2338+
This method implements the "Ralph Loop" pattern: run agent → judge output
2339+
→ improve based on feedback → repeat until threshold met.
2340+
2341+
Args:
2342+
prompt: The prompt to send to the agent
2343+
criteria: Evaluation criteria for the Judge (e.g., "Response is thorough")
2344+
threshold: Score threshold for success (default: 8.0, scale 1-10)
2345+
max_iterations: Maximum iterations before stopping (default: 5)
2346+
mode: "optimize" (stop on success) or "review" (run all iterations)
2347+
on_iteration: Optional callback called after each iteration
2348+
verbose: Enable verbose logging
2349+
2350+
Returns:
2351+
EvaluationLoopResult with iteration history and final score
2352+
2353+
Example:
2354+
```python
2355+
agent = Agent(name="analyzer", instructions="Analyze systems")
2356+
result = agent.run_until(
2357+
"Analyze the auth flow",
2358+
criteria="Analysis is thorough and actionable",
2359+
threshold=8.0,
2360+
)
2361+
print(result.final_score) # 8.5
2362+
print(result.success) # True
2363+
```
2364+
"""
2365+
from ..eval.loop import EvaluationLoop
2366+
2367+
loop = EvaluationLoop(
2368+
agent=self,
2369+
criteria=criteria,
2370+
threshold=threshold,
2371+
max_iterations=max_iterations,
2372+
mode=mode,
2373+
on_iteration=on_iteration,
2374+
verbose=verbose,
2375+
)
2376+
return loop.run(prompt)
2377+
2378+
async def run_until_async(
2379+
self,
2380+
prompt: str,
2381+
criteria: str,
2382+
threshold: float = 8.0,
2383+
max_iterations: int = 5,
2384+
mode: str = "optimize",
2385+
on_iteration: Optional[Callable[[Any], None]] = None,
2386+
verbose: bool = False,
2387+
) -> "EvaluationLoopResult":
2388+
"""
2389+
Async version of run_until().
2390+
2391+
Run agent iteratively until output meets quality criteria.
2392+
2393+
Args:
2394+
prompt: The prompt to send to the agent
2395+
criteria: Evaluation criteria for the Judge
2396+
threshold: Score threshold for success (default: 8.0)
2397+
max_iterations: Maximum iterations before stopping (default: 5)
2398+
mode: "optimize" (stop on success) or "review" (run all iterations)
2399+
on_iteration: Optional callback called after each iteration
2400+
verbose: Enable verbose logging
2401+
2402+
Returns:
2403+
EvaluationLoopResult with iteration history and final score
2404+
"""
2405+
from ..eval.loop import EvaluationLoop
2406+
2407+
loop = EvaluationLoop(
2408+
agent=self,
2409+
criteria=criteria,
2410+
threshold=threshold,
2411+
max_iterations=max_iterations,
2412+
mode=mode,
2413+
on_iteration=on_iteration,
2414+
verbose=verbose,
2415+
)
2416+
return await loop.run_async(prompt)
2417+
23012418
async def handoff_to_async(
23022419
self,
23032420
target_agent: 'Agent',
@@ -3442,7 +3559,19 @@ def _build_messages(self, prompt, temperature=1.0, output_json=None, output_pyda
34423559
if system_prompt:
34433560
messages.append({"role": "system", "content": system_prompt})
34443561

3445-
# Add chat history
3562+
# Inject session history if enabled (from persistent storage)
3563+
if self._history_enabled and self._session_store is not None:
3564+
try:
3565+
session_history = self._session_store.get_chat_history(
3566+
self._history_session_id,
3567+
max_messages=self._history_limit
3568+
)
3569+
if session_history:
3570+
messages.extend(session_history)
3571+
except Exception as e:
3572+
logging.debug(f"Failed to load session history: {e}")
3573+
3574+
# Add in-memory chat history (current conversation)
34463575
if self.chat_history:
34473576
messages.extend(self.chat_history)
34483577

src/praisonai-agents/praisonaiagents/config/feature_configs.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,10 @@ class MemoryConfig:
165165
# Continuous learning configuration
166166
learn: Optional[Union[bool, LearnConfig]] = None
167167

168+
# History injection (auto-inject session history into context)
169+
history: bool = False
170+
history_limit: int = 10
171+
168172
def to_dict(self) -> Dict[str, Any]:
169173
"""Convert to dictionary."""
170174
learn_dict = None
@@ -181,6 +185,8 @@ def to_dict(self) -> Dict[str, Any]:
181185
"claude_memory": self.claude_memory,
182186
"config": self.config,
183187
"learn": learn_dict,
188+
"history": self.history,
189+
"history_limit": self.history_limit,
184190
}
185191

186192

src/praisonai-agents/praisonaiagents/config/presets.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@
2626
"mongodb": {"backend": "mongodb"},
2727
# Feature presets
2828
"learn": {"backend": "file", "learn": True}, # Enable Agent Learn with file backend
29+
# History presets (auto-inject session history into context)
30+
"history": {"backend": "file", "history": True, "history_limit": 10},
31+
"session": {"backend": "file", "history": True, "history_limit": 10}, # Alias
32+
"chat": {"backend": "file", "history": True, "history_limit": 20}, # Conversational
2933
}
3034

3135
MEMORY_URL_SCHEMES: Dict[str, str] = {

0 commit comments

Comments
 (0)