Skip to content

Commit 0f0b640

Browse files
committed
Release v3.10.27
1 parent d857dfc commit 0f0b640

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+5542
-287
lines changed

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=3.10.26" \
19+
"praisonai>=3.10.27" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=3.10.26" \
23+
"praisonai>=3.10.27" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=3.10.26" \
19+
"praisonai>=3.10.27" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
"""
2+
Example: Auto-save agent output to file
3+
4+
This example demonstrates how to automatically save agent responses to files.
5+
"""
6+
7+
from praisonaiagents import Agent
8+
from praisonaiagents.config import OutputConfig
9+
10+
# Method 1: Simplest - just pass a file path string
11+
agent = Agent(
12+
instructions="You are a helpful assistant",
13+
output="output/response.txt" # Just a string path!
14+
)
15+
16+
# When you call start(), the response is automatically saved
17+
# result = agent.start("Write a short greeting")
18+
# Prints: ✅ Output saved to output/response.txt
19+
20+
# Method 2: With template formatting using OutputConfig
21+
agent_with_template = Agent(
22+
instructions="You are a blog writer",
23+
output=OutputConfig(
24+
output_file="output/blog_post.md",
25+
template="""# {{title}}
26+
27+
{{content}}
28+
29+
---
30+
*Generated by AI*
31+
"""
32+
)
33+
)
34+
35+
# The agent will format its response according to the template
36+
# result = agent_with_template.start("Write about Python programming")
37+
38+
# Method 3: Combine with other output settings
39+
agent_verbose = Agent(
40+
instructions="You are a researcher",
41+
output=OutputConfig(
42+
verbose=True, # Show rich output in terminal
43+
output_file="output/research.md", # Also save to file
44+
template="## Research Summary\n\n{{content}}"
45+
)
46+
)
47+
48+
print("Examples ready! Uncomment agent.start() calls to run.")
49+
print("Output will be saved to the specified files automatically.")

src/praisonai-agents/llm-gemini-advanced.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
# Detailed LLM configuration
44
llm_config = {
5-
"model": "gemini/gemini-1.5-flash-latest", # Model name without provider prefix
5+
"model": "gemini/gemini-3-flash-preview", # Model name without provider prefix
66

77
# Core settings
88
"temperature": 0.7, # Controls randomness (like temperature)
@@ -25,7 +25,7 @@
2525

2626
# Additional controls
2727
"seed": 42, # For reproducible responses
28-
"stop_phrases": ["##", "END"], # Custom stop sequences
28+
"stop_phrases": ["---END---", "END"], # Custom stop sequences
2929
}
3030

3131
agent = Agent(

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 131 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,31 @@ def _get_stream_emitter():
9393
_stream_emitter_class = StreamEventEmitter
9494
return _stream_emitter_class
9595

96+
# File extensions that indicate a file path (for output parameter detection)
97+
_FILE_EXTENSIONS = frozenset({'.txt', '.md', '.json', '.yaml', '.yml', '.html', '.csv', '.log', '.xml', '.rst'})
98+
99+
def _is_file_path(value: str) -> bool:
100+
"""Check if a string looks like a file path (not a preset name).
101+
102+
Used to detect when output="path/to/file.txt" should be treated as
103+
output_file instead of a preset name.
104+
105+
Args:
106+
value: String to check
107+
108+
Returns:
109+
True if the string looks like a file path
110+
"""
111+
# Contains path separator
112+
if '/' in value or '\\' in value:
113+
return True
114+
# Ends with common file extension
115+
lower = value.lower()
116+
for ext in _FILE_EXTENSIONS:
117+
if lower.endswith(ext):
118+
return True
119+
return False
120+
96121
# ============================================================================
97122
# Performance: Module-level imports for param resolution (moved from __init__)
98123
# These imports are lightweight and avoid per-Agent import overhead
@@ -571,6 +596,9 @@ def __init__(
571596
preset_value = OUTPUT_PRESETS.get(output_lower)
572597
if preset_value is not None:
573598
_output_config = OutputConfig(**preset_value) if isinstance(preset_value, dict) else preset_value
599+
elif _is_file_path(output):
600+
# String looks like a file path - use as output_file
601+
_output_config = OutputConfig(output_file=output)
574602
else:
575603
_output_config = OutputConfig() # Default silent
576604
elif isinstance(output, OutputConfig):
@@ -596,6 +624,8 @@ def __init__(
596624
json_output = getattr(_output_config, 'json_output', False)
597625
status_trace = getattr(_output_config, 'status_trace', False) # New: clean inline status
598626
simple_output = getattr(_output_config, 'simple_output', False) # status preset: no timestamps
627+
output_file = getattr(_output_config, 'output_file', None) # Auto-save to file
628+
output_template = getattr(_output_config, 'template', None) # Response template
599629
else:
600630
# Fallback defaults match silent mode (zero overhead)
601631
verbose, markdown, stream, metrics, reasoning_steps = False, False, False, False, False
@@ -1353,6 +1383,10 @@ def __init__(
13531383

13541384
# Action trace mode - handled via display callbacks, not separate emitter
13551385
self._actions_trace = actions_trace
1386+
1387+
# Output file and template - for auto-saving response to file
1388+
self._output_file = output_file if _output_config else None
1389+
self._output_template = output_template if _output_config else None
13561390

13571391
# Telemetry - lazy initialized via property for performance
13581392
self.__telemetry = None
@@ -3835,6 +3869,7 @@ def _build_multimodal_prompt(
38353869
"type": "image_url",
38363870
"image_url": {"url": f"data:{media_type};base64,{data}"}
38373871
})
3872+
logging.debug(f"Successfully encoded image attachment: {attachment} ({len(data)} bytes base64)")
38383873
except Exception as e:
38393874
logging.warning(f"Failed to load attachment {attachment}: {e}")
38403875
elif attachment.startswith(('http://', 'https://', 'data:')):
@@ -4393,7 +4428,7 @@ def session_id(self) -> Optional[str]:
43934428
"""Get the current session ID."""
43944429
return self._session_id
43954430

4396-
def chat(self, prompt, temperature=1.0, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None, task_name=None, task_description=None, task_id=None, config=None, force_retrieval=False, skip_retrieval=False, attachments=None):
4431+
def chat(self, prompt, temperature=1.0, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None, task_name=None, task_description=None, task_id=None, config=None, force_retrieval=False, skip_retrieval=False, attachments=None, tool_choice=None):
43974432
"""
43984433
Chat with the agent.
43994434
@@ -4402,6 +4437,8 @@ def chat(self, prompt, temperature=1.0, tools=None, output_json=None, output_pyd
44024437
attachments: Optional list of image/file paths that are ephemeral
44034438
(used for THIS turn only, NEVER stored in history).
44044439
Supports: file paths, URLs, or data URIs.
4440+
tool_choice: Optional tool choice mode ('auto', 'required', 'none').
4441+
'required' forces the LLM to call a tool before responding.
44054442
...other args...
44064443
"""
44074444
# Emit context trace event (zero overhead when not set)
@@ -4410,11 +4447,11 @@ def chat(self, prompt, temperature=1.0, tools=None, output_json=None, output_pyd
44104447
_trace_emitter.agent_start(self.name, {"role": self.role, "goal": self.goal})
44114448

44124449
try:
4413-
return self._chat_impl(prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, stream, task_name, task_description, task_id, config, force_retrieval, skip_retrieval, attachments, _trace_emitter)
4450+
return self._chat_impl(prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, stream, task_name, task_description, task_id, config, force_retrieval, skip_retrieval, attachments, _trace_emitter, tool_choice)
44144451
finally:
44154452
_trace_emitter.agent_end(self.name)
44164453

4417-
def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, stream, task_name, task_description, task_id, config, force_retrieval, skip_retrieval, attachments, _trace_emitter):
4454+
def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, stream, task_name, task_description, task_id, config, force_retrieval, skip_retrieval, attachments, _trace_emitter, tool_choice=None):
44184455
"""Internal chat implementation (extracted for trace wrapping)."""
44194456
# Apply rate limiter if configured (before any LLM call)
44204457
if self._rate_limiter is not None:
@@ -4424,6 +4461,20 @@ def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic, r
44244461
# IMPORTANT: Original text 'prompt' is stored in history, attachments are NOT
44254462
llm_prompt = self._build_multimodal_prompt(prompt, attachments) if attachments else prompt
44264463

4464+
# Apply response template if configured (DRY: TemplateConfig.response is canonical,
4465+
# OutputConfig.template is fallback for backward compatibility)
4466+
effective_template = self.response_template or self._output_template
4467+
if effective_template:
4468+
template_instruction = f"\n\nIMPORTANT: Format your response according to this template:\n{effective_template}"
4469+
if isinstance(llm_prompt, str):
4470+
llm_prompt = llm_prompt + template_instruction
4471+
elif isinstance(llm_prompt, list):
4472+
# For multimodal prompts, append to the last text content
4473+
for i in range(len(llm_prompt) - 1, -1, -1):
4474+
if isinstance(llm_prompt[i], dict) and llm_prompt[i].get('type') == 'text':
4475+
llm_prompt[i]['text'] = llm_prompt[i]['text'] + template_instruction
4476+
break
4477+
44274478
# Initialize DB session on first chat (lazy)
44284479
self._init_db_session()
44294480

@@ -4591,30 +4642,40 @@ def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic, r
45914642
)
45924643

45934644
# Pass everything to LLM class
4594-
response_text = self.llm_instance.get_response(
4595-
prompt=prompt,
4596-
system_prompt=system_prompt_for_llm,
4597-
chat_history=processed_history,
4598-
temperature=temperature,
4599-
tools=tool_param,
4600-
output_json=output_json,
4601-
output_pydantic=output_pydantic,
4602-
verbose=self.verbose,
4603-
markdown=self.markdown,
4604-
reflection=self.self_reflect,
4605-
max_reflect=self.max_reflect,
4606-
min_reflect=self.min_reflect,
4607-
console=self.console,
4608-
agent_name=self.name,
4609-
agent_role=self.role,
4610-
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
4611-
task_name=task_name,
4612-
task_description=task_description,
4613-
task_id=task_id,
4614-
execute_tool_fn=self.execute_tool, # Pass tool execution function
4615-
reasoning_steps=reasoning_steps,
4616-
stream=stream # Pass the stream parameter from chat method
4645+
# Use llm_prompt (which includes multimodal content if attachments present)
4646+
# Build LLM call kwargs
4647+
llm_kwargs = dict(
4648+
prompt=llm_prompt,
4649+
system_prompt=system_prompt_for_llm,
4650+
chat_history=processed_history,
4651+
temperature=temperature,
4652+
tools=tool_param,
4653+
output_json=output_json,
4654+
output_pydantic=output_pydantic,
4655+
verbose=self.verbose,
4656+
markdown=self.markdown,
4657+
reflection=self.self_reflect,
4658+
max_reflect=self.max_reflect,
4659+
min_reflect=self.min_reflect,
4660+
console=self.console,
4661+
agent_name=self.name,
4662+
agent_role=self.role,
4663+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
4664+
task_name=task_name,
4665+
task_description=task_description,
4666+
task_id=task_id,
4667+
execute_tool_fn=self.execute_tool,
4668+
reasoning_steps=reasoning_steps,
4669+
stream=stream
46174670
)
4671+
4672+
# Pass tool_choice if specified (auto, required, none)
4673+
# Also check for YAML-configured tool_choice on the agent
4674+
effective_tool_choice = tool_choice or getattr(self, '_yaml_tool_choice', None)
4675+
if effective_tool_choice:
4676+
llm_kwargs['tool_choice'] = effective_tool_choice
4677+
4678+
response_text = self.llm_instance.get_response(**llm_kwargs)
46184679

46194680
self.chat_history.append({"role": "assistant", "content": response_text})
46204681
# Persist assistant message to DB
@@ -4658,8 +4719,9 @@ def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic, r
46584719
logging.debug(f"Agent {self.name} using native structured output with response_format")
46594720

46604721
# Use the new _build_messages helper method
4722+
# Pass llm_prompt (which includes multimodal content if attachments present)
46614723
messages, original_prompt = self._build_messages(
4662-
prompt, temperature, output_json, output_pydantic,
4724+
llm_prompt, temperature, output_json, output_pydantic,
46634725
use_native_format=use_native_format
46644726
)
46654727

@@ -5816,6 +5878,10 @@ def run_chat():
58165878
# Auto-save session if enabled
58175879
self._auto_save_session()
58185880

5881+
# Auto-save output to file if configured
5882+
if result and self._output_file:
5883+
self._save_output_to_file(str(result))
5884+
58195885
return result
58205886
finally:
58215887
# Restore original output settings
@@ -5894,6 +5960,44 @@ def _auto_save_session(self):
58945960
except Exception as e:
58955961
logging.debug(f"Error auto-saving session: {e}")
58965962

5963+
def _save_output_to_file(self, content: str) -> bool:
5964+
"""Save agent output to file if output_file is configured.
5965+
5966+
Args:
5967+
content: The response content to save
5968+
5969+
Returns:
5970+
True if file was saved, False otherwise
5971+
"""
5972+
if not self._output_file:
5973+
return False
5974+
5975+
try:
5976+
import os
5977+
5978+
# Expand user home directory and resolve path
5979+
file_path = os.path.expanduser(self._output_file)
5980+
file_path = os.path.abspath(file_path)
5981+
5982+
# Create parent directories if they don't exist
5983+
parent_dir = os.path.dirname(file_path)
5984+
if parent_dir and not os.path.exists(parent_dir):
5985+
os.makedirs(parent_dir, exist_ok=True)
5986+
5987+
# Write content to file
5988+
with open(file_path, 'w', encoding='utf-8') as f:
5989+
f.write(str(content))
5990+
5991+
# Print success message to terminal
5992+
print(f"✅ Output saved to {file_path}")
5993+
logging.debug(f"Output saved to file: {file_path}")
5994+
return True
5995+
5996+
except Exception as e:
5997+
logging.warning(f"Failed to save output to file '{self._output_file}': {e}")
5998+
print(f"⚠️ Failed to save output to {self._output_file}: {e}")
5999+
return False
6000+
58976001
def _start_stream(self, prompt: str, **kwargs) -> Generator[str, None, None]:
58986002
"""Stream generator for real-time response chunks."""
58996003
try:

src/praisonai-agents/praisonaiagents/config/feature_configs.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -557,6 +557,15 @@ class OutputConfig:
557557
# Shows: [timestamp] Calling LLM..., Executing tool..., Response: ...
558558
status_trace: bool = False
559559

560+
# Output file - save agent response to file automatically
561+
# When set, saves the response to the specified file path
562+
output_file: Optional[str] = None
563+
564+
# Output template - format template for the response
565+
# Agent will be instructed to follow this template when generating response
566+
# Example: "# {{title}}\n\n{{content}}\n\n---\nGenerated by AI"
567+
template: Optional[str] = None
568+
560569
def to_dict(self) -> Dict[str, Any]:
561570
"""Convert to dictionary."""
562571
return {
@@ -570,6 +579,8 @@ def to_dict(self) -> Dict[str, Any]:
570579
"simple_output": self.simple_output,
571580
"show_parameters": self.show_parameters,
572581
"status_trace": self.status_trace,
582+
"output_file": self.output_file,
583+
"template": self.template,
573584
}
574585

575586

src/praisonai-agents/praisonaiagents/context/manager.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -844,8 +844,9 @@ def capture_llm_boundary(
844844
message_hash = hashlib.sha256(messages_json.encode()).hexdigest()[:16]
845845
tools_hash = hashlib.sha256(tools_json.encode()).hexdigest()[:16]
846846

847+
from datetime import timezone
847848
hook_data = SnapshotHookData(
848-
timestamp=datetime.utcnow().isoformat() + "Z",
849+
timestamp=datetime.now(tz=timezone.utc).isoformat().replace('+00:00', 'Z'),
849850
messages=messages,
850851
tools=tools,
851852
message_hash=message_hash,
@@ -998,8 +999,9 @@ def _add_history_event(
998999
details: Optional[Dict[str, Any]] = None,
9991000
) -> None:
10001001
"""Add an event to optimization history."""
1002+
from datetime import timezone
10011003
event = OptimizationEvent(
1002-
timestamp=datetime.utcnow().isoformat() + "Z",
1004+
timestamp=datetime.now(tz=timezone.utc).isoformat().replace('+00:00', 'Z'),
10031005
event_type=event_type,
10041006
strategy=strategy,
10051007
tokens_before=tokens_before,

0 commit comments

Comments
 (0)