Skip to content

Commit 875da3d

Browse files
committed
feat(agent, llm): enhance agent response handling and tool call logic
Improved handling of agent's chat responses, including better doom loop detection. Added intermediate narrative emission for LLM tool calls, enhancing transparency in execution logic. Updated tests to cover new functionality.
1 parent 620e21b commit 875da3d

File tree

6 files changed

+124
-41
lines changed

6 files changed

+124
-41
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 45 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -2439,7 +2439,45 @@ def run_autonomous(
24392439
started_at=started_at,
24402440
)
24412441

2442-
# Check doom loop with graduated recovery (G-RECOVERY-2 fix)
2442+
2443+
# Execute one turn using the agent's chat method
2444+
# Always use the original prompt (prompt re-injection)
2445+
# Reset per-turn tool count for no-tool-call detection
2446+
self._autonomy_turn_tool_count = 0
2447+
try:
2448+
response = self.chat(prompt)
2449+
except Exception as e:
2450+
return AutonomyResult(
2451+
success=False,
2452+
output=str(e),
2453+
completion_reason="error",
2454+
iterations=iterations,
2455+
stage=stage,
2456+
actions=actions_taken,
2457+
duration_seconds=time_module.time() - start_time,
2458+
error=str(e),
2459+
started_at=started_at,
2460+
)
2461+
2462+
response_str = str(response)
2463+
2464+
# Record response text for content streaming loop detection
2465+
if self._doom_loop_tracker is not None:
2466+
self._doom_loop_tracker.record_response(response_str)
2467+
2468+
# Record the action for doom loop tracking (G1 fix: was missing)
2469+
# Use response hash only — iteration was removed because it made
2470+
# every fingerprint unique, preventing doom loop detection.
2471+
self._record_action(
2472+
"chat",
2473+
{"response_hash": hash(response_str[:500])},
2474+
response_str[:200],
2475+
True
2476+
)
2477+
2478+
# Check doom loop AFTER recording action so detector sees
2479+
# the current iteration's fingerprint (repositioned from top
2480+
# of loop for correct detection timing).
24432481
if self._is_doom_loop():
24442482
recovery = self._get_doom_recovery()
24452483

@@ -2461,12 +2499,13 @@ def run_autonomous(
24612499
prompt = prompt + "\n\n[System: Previous approach repeated. Try a completely different strategy.]"
24622500
if self._doom_loop_tracker is not None:
24632501
self._doom_loop_tracker.clear_actions()
2502+
self._consecutive_no_tool_turns = 0
24642503
continue
24652504
elif recovery == "escalate_model":
2466-
# Give the agent one more try with explicit error guidance
24672505
prompt = prompt + "\n\n[System: You are stuck in a loop. CRITICAL: Check that all tool argument names exactly match the function signature. Do NOT add '=' to argument names. Use only the documented parameter names.]"
24682506
if self._doom_loop_tracker is not None:
24692507
self._doom_loop_tracker.clear_actions()
2508+
self._consecutive_no_tool_turns = 0
24702509
continue
24712510
elif recovery == "request_help":
24722511
return AutonomyResult(
@@ -2491,41 +2530,6 @@ def run_autonomous(
24912530
started_at=started_at,
24922531
)
24932532

2494-
# Execute one turn using the agent's chat method
2495-
# Always use the original prompt (prompt re-injection)
2496-
# Reset per-turn tool count for no-tool-call detection
2497-
self._autonomy_turn_tool_count = 0
2498-
try:
2499-
response = self.chat(prompt)
2500-
except Exception as e:
2501-
return AutonomyResult(
2502-
success=False,
2503-
output=str(e),
2504-
completion_reason="error",
2505-
iterations=iterations,
2506-
stage=stage,
2507-
actions=actions_taken,
2508-
duration_seconds=time_module.time() - start_time,
2509-
error=str(e),
2510-
started_at=started_at,
2511-
)
2512-
2513-
response_str = str(response)
2514-
2515-
# Record response text for content streaming loop detection
2516-
if self._doom_loop_tracker is not None:
2517-
self._doom_loop_tracker.record_response(response_str)
2518-
2519-
# Record the action for doom loop tracking (G1 fix: was missing)
2520-
# Use iteration number + response hash to avoid false positives
2521-
# when same prompt is re-injected (G8 fix: doom loop false positive)
2522-
self._record_action(
2523-
"chat",
2524-
{"iteration": iterations, "response_hash": hash(response_str[:500])},
2525-
response_str[:200],
2526-
True
2527-
)
2528-
25292533
# Record for action history
25302534
actions_taken.append({
25312535
"iteration": iterations,
@@ -2661,7 +2665,10 @@ def run_autonomous(
26612665
# No-tool-call termination: if model makes no tool calls for 2+
26622666
# consecutive turns, treat as completion signal.
26632667
# Skip first iteration (model may be planning).
2664-
if self._autonomy_turn_tool_count == 0 and iterations > 1:
2668+
# Only applies to agents WITH tools — for tool-less agents,
2669+
# no_tool_calls is meaningless and would mask doom loop detection.
2670+
has_tools = bool(getattr(self, 'tools', None))
2671+
if self._autonomy_turn_tool_count == 0 and iterations > 1 and has_tools:
26652672
self._consecutive_no_tool_turns += 1
26662673
if self._consecutive_no_tool_turns >= 2:
26672674
execute_sync_callback('autonomy_complete',

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2273,6 +2273,16 @@ def get_response(
22732273

22742274
# Handle tool calls - Sequential tool calling logic
22752275
if tool_calls and execute_tool_fn:
2276+
# Emit intermediate LLM narrative before tool execution
2277+
# This is the text the LLM generated alongside tool calls,
2278+
# explaining its reasoning — competitors display this inline.
2279+
if response_text and response_text.strip():
2280+
_get_display_functions()['execute_sync_callback'](
2281+
'llm_content',
2282+
content=response_text.strip(),
2283+
agent_name=agent_name,
2284+
)
2285+
22762286
# Convert tool_calls to a serializable format for all providers
22772287
serializable_tool_calls = self._serialize_tool_calls(tool_calls)
22782288
# Check if this is Ollama provider
@@ -2961,6 +2971,13 @@ def get_response_stream(
29612971

29622972
# After streaming completes, handle tool calls if present
29632973
if tool_calls and execute_tool_fn:
2974+
# Emit intermediate LLM narrative (parity with streaming path)
2975+
if response_text and response_text.strip():
2976+
_get_display_functions()['execute_sync_callback'](
2977+
'llm_content',
2978+
content=response_text.strip(),
2979+
agent_name=agent_name,
2980+
)
29642981
# Add assistant message with tool calls to conversation
29652982
if self._is_ollama_provider():
29662983
messages.append({

src/praisonai-agents/praisonaiagents/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@
112112
# - autonomy_complete: {completion_reason, iterations, duration_seconds}
113113
# - retry: {attempt, max_attempts, error, retry_in_seconds}
114114
SUPPORTED_CALLBACK_TYPES = [
115-
'tool_call', 'interaction', 'error', 'llm_start', 'llm_end',
115+
'tool_call', 'interaction', 'error', 'llm_start', 'llm_end', 'llm_content',
116116
'autonomy_iteration', 'autonomy_stage_change', 'autonomy_doom_loop', 'autonomy_complete',
117117
'retry',
118118
]

src/praisonai-agents/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "praisonaiagents"
7-
version = "1.5.54"
7+
version = "1.5.55"
88
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
99
readme = "README.md"
1010
requires-python = ">=3.10"

src/praisonai-agents/tests/unit/test_editor_output.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -320,3 +320,62 @@ def test_get_editor_output(self):
320320
assert get_editor_output() is editor
321321

322322
disable_editor_output()
323+
324+
325+
class TestLlmContentCallback:
326+
"""Test llm_content callback for intermediate narrative display."""
327+
328+
def setup_method(self):
329+
"""Clean up callback state before each test."""
330+
from praisonaiagents.main import sync_display_callbacks
331+
from praisonaiagents.output.editor import disable_editor_output
332+
sync_display_callbacks.pop('llm_content', None)
333+
disable_editor_output()
334+
335+
def teardown_method(self):
336+
"""Restore state after each test."""
337+
from praisonaiagents.main import sync_display_callbacks
338+
from praisonaiagents.output.editor import disable_editor_output
339+
sync_display_callbacks.pop('llm_content', None)
340+
disable_editor_output()
341+
342+
def test_llm_content_in_supported_types(self):
343+
"""llm_content must be a documented supported callback type."""
344+
from praisonaiagents.main import SUPPORTED_CALLBACK_TYPES
345+
assert 'llm_content' in SUPPORTED_CALLBACK_TYPES
346+
347+
def test_enable_editor_output_registers_llm_content(self):
348+
"""enable_editor_output must register a llm_content callback."""
349+
from praisonaiagents.output.editor import enable_editor_output
350+
from praisonaiagents.main import sync_display_callbacks
351+
352+
enable_editor_output(use_color=False)
353+
assert 'llm_content' in sync_display_callbacks
354+
355+
def test_llm_content_callback_fires_narrative(self):
356+
"""Firing llm_content callback must create a NARRATIVE block."""
357+
from praisonaiagents.output.editor import enable_editor_output, get_editor_output, BlockType
358+
from praisonaiagents.main import execute_sync_callback
359+
360+
enable_editor_output(use_color=False)
361+
execute_sync_callback('llm_content', content="Let me analyze this further.")
362+
363+
editor = get_editor_output()
364+
blocks = editor.get_blocks()
365+
narrative_blocks = [b for b in blocks if b.type == BlockType.NARRATIVE]
366+
assert len(narrative_blocks) == 1
367+
assert narrative_blocks[0].content == "Let me analyze this further."
368+
369+
def test_llm_content_callback_skips_empty(self):
370+
"""Firing llm_content with empty content must not create a block."""
371+
from praisonaiagents.output.editor import enable_editor_output, get_editor_output, BlockType
372+
from praisonaiagents.main import execute_sync_callback
373+
374+
enable_editor_output(use_color=False)
375+
execute_sync_callback('llm_content', content="")
376+
execute_sync_callback('llm_content', content=" ")
377+
378+
editor = get_editor_output()
379+
blocks = editor.get_blocks()
380+
narrative_blocks = [b for b in blocks if b.type == BlockType.NARRATIVE]
381+
assert len(narrative_blocks) == 0

src/praisonai-agents/uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)