Skip to content

Commit 44b800d

Browse files
committed
Release v3.11.14
1 parent 6e3a99b commit 44b800d

File tree

16 files changed

+241
-72
lines changed

16 files changed

+241
-72
lines changed

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=3.11.13" \
19+
"praisonai>=3.11.14" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=3.11.13" \
23+
"praisonai>=3.11.14" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=3.11.13" \
19+
"praisonai>=3.11.14" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

src/praisonai-agents/AGENTS.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -372,6 +372,9 @@ class MyTool(BaseTool):
372372
- `praisonaiagents/tools/decorator.py` - @tool decorator
373373
- `praisonaiagents/tools/registry.py` - Tool registry
374374

375+
> [!IMPORTANT]
376+
> **Agents only recognize parameters.** When creating tools, always expose all options as function parameters—agents cannot discover env vars or config files.
377+
375378
### 6.2 Hooks & Middleware
376379

377380
```python

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 67 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -799,15 +799,26 @@ def _generate_ollama_tool_summary(self, tool_results: List[Any], response_text:
799799
# For Ollama, always generate summary when we have tool results
800800
# This prevents infinite loops caused by empty/minimal responses
801801

802-
# Filter out error results first
802+
# Filter out error results and collect error messages
803803
valid_results = []
804+
error_messages = []
804805
for result in tool_results:
805-
# Skip error responses
806+
# Check for error responses
806807
if isinstance(result, dict) and 'error' in result:
808+
error_messages.append(result.get('error', 'Unknown error'))
807809
continue
810+
if isinstance(result, list) and len(result) > 0:
811+
first_item = result[0]
812+
if isinstance(first_item, dict) and 'error' in first_item:
813+
error_messages.append(first_item.get('error', 'Unknown error'))
814+
continue
808815
valid_results.append(result)
809816

810-
# If no valid results, return None to continue
817+
# If no valid results but we have errors, return a friendly error message
818+
if not valid_results and error_messages:
819+
return "I'm sorry, but I wasn't able to complete the search. The service encountered some issues. Please try again later or try a different search query."
820+
821+
# If no valid results at all, return None to let the loop continue
811822
if not valid_results:
812823
return None
813824

@@ -845,16 +856,43 @@ def _format_ollama_tool_result_message(self, function_name: str, tool_result: An
845856
"""
846857
Format tool result message for Ollama provider.
847858
Enhanced to instruct model to use the result for final answer.
859+
Handles error results with specific instructions to provide user-friendly responses.
848860
"""
849-
tool_result_str = str(tool_result)
850-
return {
851-
"role": "user",
852-
"content": f"""Tool execution complete.
861+
# Check if the result is an error
862+
is_error = False
863+
error_message = None
864+
865+
if isinstance(tool_result, dict) and 'error' in tool_result:
866+
is_error = True
867+
error_message = tool_result.get('error', 'Unknown error')
868+
elif isinstance(tool_result, list) and len(tool_result) > 0:
869+
first_item = tool_result[0]
870+
if isinstance(first_item, dict) and 'error' in first_item:
871+
is_error = True
872+
error_message = first_item.get('error', 'Unknown error')
873+
874+
if is_error:
875+
# For errors, provide clear instructions to give a helpful response
876+
return {
877+
"role": "user",
878+
"content": f"""The tool "{function_name}" encountered an error:
879+
{error_message}
880+
881+
Please provide a helpful response to the user explaining that the operation could not be completed.
882+
Be apologetic and suggest alternatives if possible. Do NOT repeat the raw error message.
883+
Give a natural, conversational response."""
884+
}
885+
else:
886+
# For successful results, format normally
887+
tool_result_str = str(tool_result)
888+
return {
889+
"role": "user",
890+
"content": f"""Tool execution complete.
853891
Function: {function_name}
854892
Result: {tool_result_str}
855893
856-
Now provide your final answer using this result."""
857-
}
894+
Now provide your final answer using this result. Summarize the information naturally for the user."""
895+
}
858896

859897
def _get_tool_names_for_prompt(self, formatted_tools: Optional[List]) -> str:
860898
"""Extract tool names from formatted tools for prompts."""
@@ -2254,10 +2292,19 @@ def get_response(
22542292
messages.append(self._format_ollama_tool_result_message(function_name, tool_result))
22552293
else:
22562294
# For other providers, use tool role with tool_call_id
2295+
# Format error results more clearly
2296+
if tool_result is None:
2297+
content = "Function returned an empty output"
2298+
elif isinstance(tool_result, dict) and 'error' in tool_result:
2299+
content = f"Error: {tool_result.get('error', 'Unknown error')}. Please inform the user that the operation could not be completed."
2300+
elif isinstance(tool_result, list) and len(tool_result) > 0 and isinstance(tool_result[0], dict) and 'error' in tool_result[0]:
2301+
content = f"Error: {tool_result[0].get('error', 'Unknown error')}. Please inform the user that the operation could not be completed."
2302+
else:
2303+
content = json.dumps(tool_result)
22572304
messages.append({
22582305
"role": "tool",
22592306
"tool_call_id": tool_call_id,
2260-
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
2307+
"content": content
22612308
})
22622309

22632310
# Check if we should continue (for tools like sequential thinking)
@@ -3281,10 +3328,19 @@ async def get_response_async(
32813328
messages.append(self._format_ollama_tool_result_message(function_name, tool_result))
32823329
else:
32833330
# For other providers, use tool role with tool_call_id
3331+
# Format error results more clearly
3332+
if tool_result is None:
3333+
content = "Function returned an empty output"
3334+
elif isinstance(tool_result, dict) and 'error' in tool_result:
3335+
content = f"Error: {tool_result.get('error', 'Unknown error')}. Please inform the user that the operation could not be completed."
3336+
elif isinstance(tool_result, list) and len(tool_result) > 0 and isinstance(tool_result[0], dict) and 'error' in tool_result[0]:
3337+
content = f"Error: {tool_result[0].get('error', 'Unknown error')}. Please inform the user that the operation could not be completed."
3338+
else:
3339+
content = json.dumps(tool_result)
32843340
messages.append({
32853341
"role": "tool",
32863342
"tool_call_id": tool_call_id,
3287-
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
3343+
"content": content
32883344
})
32893345

32903346
# For Ollama, add explicit prompt if we need a final answer

0 commit comments

Comments
 (0)