Skip to content

Commit a293999

Browse files
committed
Release v4.5.18
1 parent 0b14ae4 commit a293999

File tree

13 files changed

+130
-63
lines changed

13 files changed

+130
-63
lines changed

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=4.5.17" \
19+
"praisonai>=4.5.18" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=4.5.17" \
23+
"praisonai>=4.5.18" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=4.5.17" \
19+
"praisonai>=4.5.18" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

src/praisonai-agents/praisonaiagents/output/status.py

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -80,13 +80,15 @@ def __init__(
8080
use_color: bool = True,
8181
show_timestamps: bool = True, # NEW: control timestamp display
8282
show_metrics: bool = False, # Enable metrics display for debug mode
83+
show_tool_output: bool = True, # Show tool result output inline
8384
):
8485
self._file = file or sys.stderr # Use stderr to not interfere with agent output
8586
self._format = format
8687
self._redact = redact
8788
self._use_color = use_color
8889
self._show_timestamps = show_timestamps
8990
self._show_metrics = show_metrics
91+
self._show_tool_output = show_tool_output
9092
self._console = None
9193
self._tool_start_times: Dict[str, float] = {}
9294
self._lock = threading.Lock() # Per-sink lock for thread safety
@@ -120,12 +122,12 @@ def _format_args(self, args: Optional[Dict[str, Any]]) -> str:
120122
parts = []
121123
for k, v in (redacted or {}).items():
122124
if isinstance(v, str):
123-
v_str = f'"{_truncate(v, 30)}"'
125+
v_str = f'"{_truncate(v, 80)}"'
124126
else:
125-
v_str = _truncate(str(v), 30)
127+
v_str = _truncate(str(v), 80)
126128
parts.append(f"{k}={v_str}")
127129

128-
return _truncate(", ".join(parts), 80)
130+
return _truncate(", ".join(parts), 200)
129131

130132
def agent_start(self, agent_name: str) -> None:
131133
"""Record agent start."""
@@ -185,8 +187,8 @@ def llm_end(
185187
"""Record LLM call end with optional metrics for debug mode."""
186188
ts = time.time()
187189

188-
# Use latency_ms if provided, otherwise calculate from start time
189-
if latency_ms is not None:
190+
# Use latency_ms if provided and positive, otherwise calculate from start time
191+
if latency_ms is not None and latency_ms > 0:
190192
duration_ms = latency_ms
191193
elif duration_ms is None and hasattr(self, '_llm_start_time'):
192194
start_ts = self._llm_start_time
@@ -267,9 +269,13 @@ def tool_end(
267269
args_str = self._format_args(self._pending_tool_args)
268270

269271
result_str = ""
270-
if result_summary:
271-
result_str = f" → {_truncate(result_summary, 50)}"
272+
if self._show_tool_output:
273+
if result_summary:
274+
result_str = f" → {_truncate(result_summary, 50)}"
275+
elif error_message:
276+
result_str = f" → ✗ {_truncate(error_message, 50)}"
272277
elif error_message:
278+
# Always show errors even when output is hidden
273279
result_str = f" → ✗ {_truncate(error_message, 50)}"
274280

275281
color = "cyan" if status == "ok" else "red"
@@ -285,9 +291,9 @@ def output(self, content: str, agent_name: Optional[str] = None) -> None:
285291
separator = "─" * 50
286292
self._emit_text(separator, ts, "dim", show_timestamp=False)
287293
self._emit_text("Final Output:", ts, "bold", show_timestamp=False)
288-
# Print actual content without truncation for final output
294+
# Print actual content to stdout (not stderr) so users see it
289295
with self._lock: # Thread-safe output
290-
print(content, file=self._file)
296+
print(content)
291297

292298
def _emit_text(self, message: str, ts: float, style: str = None, show_timestamp: bool = True) -> None:
293299
"""Emit a text line. Thread-safe for multi-agent execution."""
@@ -320,6 +326,7 @@ def enable_status_output(
320326
use_color: bool = True,
321327
show_timestamps: bool = True,
322328
show_metrics: bool = False, # Enable metrics for debug mode
329+
show_tool_output: bool = True, # Show tool result output inline
323330
) -> StatusOutput:
324331
"""
325332
Enable actions output mode globally.
@@ -334,6 +341,7 @@ def enable_status_output(
334341
use_color: Whether to use colored output (default: True)
335342
show_timestamps: Whether to show timestamps (default: True)
336343
show_metrics: Whether to show token/cost metrics (default: False)
344+
show_tool_output: Whether to show tool result output inline (default: True)
337345
338346
Returns:
339347
StatusOutput instance for programmatic access
@@ -350,6 +358,7 @@ def enable_status_output(
350358
use_color=use_color,
351359
show_timestamps=show_timestamps,
352360
show_metrics=show_metrics,
361+
show_tool_output=show_tool_output,
353362
)
354363
_status_output_enabled = True
355364

src/praisonai-agents/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "praisonaiagents"
7-
version = "1.5.17"
7+
version = "1.5.18"
88
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
99
readme = "README.md"
1010
requires-python = ">=3.10"

src/praisonai-agents/uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/praisonai/praisonai.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ class Praisonai < Formula
33

44
desc "AI tools for various AI applications"
55
homepage "https://github.com/MervinPraison/PraisonAI"
6-
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v4.5.17.tar.gz"
7-
sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v4.5.17.tar.gz | shasum -a 256`.split.first
6+
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v4.5.18.tar.gz"
7+
sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v4.5.18.tar.gz | shasum -a 256`.split.first
88
license "MIT"
99

1010
depends_on "python@3.11"

src/praisonai/praisonai/cli/legacy.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,11 +99,16 @@ def is_legacy_invocation(argv: List[str]) -> bool:
9999
'-p', '--autonomy', '--trust',
100100
'--sandbox', '--external-agent', '--compare', '--interval', '--timeout',
101101
'--max-cost', '--rpm', '--tpm', '--temperature',
102+
# Display flags (verbosity ladder + machine output)
103+
'--output', '--flow',
102104
]
103105

104106
for arg in argv:
105107
if arg in legacy_flags:
106108
return True
109+
# Handle -vv, -qq, -vvv etc. (count flags not in Typer)
110+
if len(arg) > 2 and arg.startswith('-') and not arg.startswith('--') and arg[1:] in ('vv', 'qq', 'vvv', 'qqq'):
111+
return True
107112

108113
# Check if first arg is a legacy command not in Typer
109114
if first_arg in LEGACY_COMMANDS and first_arg not in TYPER_COMMANDS:

src/praisonai/praisonai/cli/main.py

Lines changed: 93 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -838,7 +838,14 @@ def parse_args(self):
838838
parser.add_argument("--no-acp", action="store_true", help="Disable ACP tools (agentic file operations with plan/approve/apply)")
839839
parser.add_argument("--no-lsp", action="store_true", help="Disable LSP tools (code intelligence: symbols, definitions, references)")
840840
parser.add_argument("--save", "-s", action="store_true", help="Save research output to file (output/research/)")
841-
parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output for research")
841+
parser.add_argument("-v", "--verbose", action="count", default=0,
842+
help="Increase verbosity (-v=verbose, -vv=debug)")
843+
parser.add_argument("-q", "--quiet", action="count", default=0,
844+
help="Decrease verbosity (-q=quiet, -qq=silent)")
845+
parser.add_argument("--output", type=str, choices=["json", "jsonl"], dest="output_format",
846+
help="Machine-readable output format (json or jsonl)")
847+
parser.add_argument("--flow", action="store_true",
848+
help="Show visual agent→tool flow chart")
842849
parser.add_argument("--web", "--web-search", action="store_true", help="Enable native web search (OpenAI, Gemini, Anthropic, xAI, Perplexity)")
843850
parser.add_argument("--web-fetch", action="store_true", help="Enable web fetch to retrieve URL content (Anthropic only)")
844851
parser.add_argument("--prompt-caching", action="store_true", help="Enable prompt caching to reduce costs (OpenAI, Anthropic, Bedrock, Deepseek)")
@@ -954,9 +961,6 @@ def parse_args(self):
954961
parser.add_argument("--lsp", action="store_true",
955962
help="Enable LSP tools in autonomy mode (slower but provides code intelligence)")
956963

957-
# P3/G5: Display mode - control output verbosity
958-
parser.add_argument("--display", type=str, choices=["minimal", "status", "verbose", "debug", "jsonl", "json", "flow"],
959-
default="status", help="Display mode: minimal|status|verbose|debug|jsonl|json|flow")
960964

961965
# P8/G11: Tool timeout - prevent slow tools from blocking
962966
parser.add_argument("--tool-timeout", type=int, default=60,
@@ -3888,9 +3892,12 @@ def handle_direct_prompt(self, prompt):
38883892
if autonomy_mode and autonomy_mode not in ('disable', None):
38893893
agent_config["autonomy"] = {"level": autonomy_mode, "enabled": True}
38903894

3891-
# Set output mode based on --verbose flag
3892-
# Uses consolidated 'output' param instead of deprecated 'verbose'
3893-
if hasattr(self, 'args') and getattr(self.args, 'verbose', False):
3895+
# Set SDK output preset based on verbosity flags
3896+
# The display dispatcher handles CLI rendering; this controls SDK-level behavior
3897+
v = getattr(self.args, 'verbose', 0) if hasattr(self, 'args') else 0
3898+
if v >= 2:
3899+
agent_config["output"] = "verbose" # SDK debug-level detail
3900+
elif v >= 1:
38943901
agent_config["output"] = "verbose"
38953902
else:
38963903
agent_config["output"] = "minimal"
@@ -4264,42 +4271,29 @@ def level_based_approve(function_name, arguments, risk_level):
42644271
else:
42654272
result = auto_rag.chat(prompt)
42664273
else:
4267-
# Unified display mode dispatcher
4268-
display_mode = getattr(self.args, 'display', 'status')
4274+
# Resolve display mode from CLI flags
4275+
display_mode = self._resolve_display_mode()
42694276

4270-
# Also check Typer global state (for -o json, --quiet, etc.)
4271-
try:
4272-
from .app import state as typer_state
4273-
if typer_state.quiet:
4274-
display_mode = 'minimal'
4275-
elif typer_state.output_format.value == 'json':
4276-
display_mode = 'json'
4277-
elif typer_state.output_format.value == 'stream-json':
4278-
display_mode = 'jsonl'
4279-
elif typer_state.screen_reader:
4280-
display_mode = 'debug' # trace-like, no spinners
4281-
except (ImportError, AttributeError):
4282-
pass
4283-
4284-
# Check legacy verbose flag
4285-
is_verbose = agent_config.get("verbose", False)
4286-
if is_verbose and display_mode == 'status':
4287-
display_mode = 'verbose'
4277+
if display_mode == 'silent':
4278+
# -qq: No output at all, exit code only
4279+
if hasattr(agent, 'start'):
4280+
result = agent.start(prompt)
4281+
else:
4282+
result = agent.chat(prompt)
42884283

4289-
if display_mode == 'minimal':
4290-
# Quiet: result only, no spinners
4284+
elif display_mode == 'quiet':
4285+
# -q: Result only, no spinners or status
42914286
if hasattr(agent, 'start'):
42924287
result = agent.start(prompt)
42934288
else:
42944289
result = agent.chat(prompt)
4295-
# Still print the result
42964290
if result is not None:
42974291
output = getattr(result, 'output', None) or (str(result) if result else None)
42984292
if output:
42994293
print(output)
43004294

43014295
elif display_mode == 'verbose':
4302-
# SDK StatusOutput with timestamps and metrics
4296+
# -v: SDK StatusOutput with timestamps and metrics
43034297
try:
43044298
from praisonaiagents.output.status import enable_status_output, disable_status_output
43054299
enable_status_output(show_timestamps=True, show_metrics=True)
@@ -4315,7 +4309,7 @@ def level_based_approve(function_name, arguments, risk_level):
43154309
result = agent.chat(prompt)
43164310

43174311
elif display_mode == 'debug':
4318-
# SDK TraceOutput with markdown
4312+
# -vv: SDK TraceOutput with markdown rendering
43194313
try:
43204314
from praisonaiagents.output.trace import enable_trace_output, disable_trace_output
43214315
enable_trace_output(use_markdown=True)
@@ -4331,7 +4325,7 @@ def level_based_approve(function_name, arguments, risk_level):
43314325
result = agent.chat(prompt)
43324326

43334327
elif display_mode == 'jsonl':
4334-
# JSONL structured output for CI/CD
4328+
# --output jsonl: JSONL structured output for CI/CD
43354329
from .features.display_jsonl import JsonlDisplay
43364330
from praisonaiagents.main import register_display_callback as _reg_cb
43374331

@@ -4367,7 +4361,7 @@ def level_based_approve(function_name, arguments, risk_level):
43674361
print(result)
43684362

43694363
elif display_mode == 'json':
4370-
# JSON envelope output
4364+
# --output json: JSON envelope output
43714365
import json as json_mod
43724366
start_time = time.time()
43734367
if hasattr(agent, 'start'):
@@ -4388,7 +4382,7 @@ def level_based_approve(function_name, arguments, risk_level):
43884382
print(json_mod.dumps(envelope, indent=2))
43894383

43904384
elif display_mode == 'flow':
4391-
# SDK FlowDisplay - visual agent→tool chart
4385+
# --flow: SDK FlowDisplay - visual agent→tool chart
43924386
try:
43934387
from praisonaiagents.flow_display import track_workflow
43944388
flow = track_workflow()
@@ -4405,8 +4399,21 @@ def level_based_approve(function_name, arguments, risk_level):
44054399
result = agent.chat(prompt)
44064400

44074401
else:
4408-
# Default "status" mode - enhanced interactive display
4409-
result = self._run_with_status_display(agent, prompt)
4402+
# Default: SDK status output — clean inline progress
4403+
# Shows: spinner + tool calls, no panels, no timestamps
4404+
try:
4405+
from praisonaiagents.output.status import enable_status_output, disable_status_output
4406+
enable_status_output(show_timestamps=False, show_metrics=False)
4407+
if hasattr(agent, 'start'):
4408+
result = agent.start(prompt)
4409+
else:
4410+
result = agent.chat(prompt)
4411+
disable_status_output()
4412+
except ImportError:
4413+
if hasattr(agent, 'start'):
4414+
result = agent.start(prompt)
4415+
else:
4416+
result = agent.chat(prompt)
44104417

44114418
# ===== POST-PROCESSING WITH NEW FEATURES =====
44124419

@@ -4590,14 +4597,60 @@ def _handle_profiled_prompt(self, prompt):
45904597
# Return the actual result for any downstream processing
45914598
return result.output
45924599

4600+
def _resolve_display_mode(self):
4601+
"""Map CLI flags to a display mode string.
4602+
4603+
Priority: --output > --flow > --display (deprecated) > -v/-q > default.
4604+
Returns one of: 'silent', 'quiet', 'verbose', 'debug', 'json', 'jsonl', 'flow', 'status'.
4605+
"""
4606+
# Machine formats take highest priority
4607+
output_fmt = getattr(self.args, 'output_format', None)
4608+
if output_fmt:
4609+
return output_fmt # "json" or "jsonl"
4610+
4611+
# --flow is an independent feature flag
4612+
if getattr(self.args, 'flow', False) or getattr(self.args, 'flow_display', False):
4613+
return 'flow'
4614+
4615+
# Check Typer global state (for Typer subcommands)
4616+
try:
4617+
from .app import state as typer_state
4618+
if typer_state.quiet:
4619+
return 'quiet'
4620+
if hasattr(typer_state, 'output_format'):
4621+
if typer_state.output_format.value == 'json':
4622+
return 'json'
4623+
elif typer_state.output_format.value == 'stream-json':
4624+
return 'jsonl'
4625+
if typer_state.screen_reader:
4626+
return 'verbose' # Accessible: timestamps but no spinners
4627+
except (ImportError, AttributeError):
4628+
pass
4629+
4630+
# Verbosity ladder: -v/-vv/-q/-qq
4631+
v = getattr(self.args, 'verbose', 0)
4632+
q = getattr(self.args, 'quiet', 0)
4633+
if q >= 2:
4634+
return 'silent'
4635+
if q >= 1:
4636+
return 'quiet'
4637+
if v >= 2:
4638+
return 'debug'
4639+
if v >= 1:
4640+
return 'verbose'
4641+
4642+
return 'status' # Default: Rich Live TUI
4643+
45934644
def _run_with_status_display(self, agent, prompt):
45944645
"""
4595-
Run agent with minimal status display (spinner + tool/handoff updates).
4646+
Run agent with Rich Live TUI display (default CLI experience).
45964647
45974648
Shows:
4598-
- "Generating..." with spinner while processing
4649+
- Spinner with emoji (🤖 thinking, ⏳ tools)
45994650
- Real-time tool call notifications via registered callback
4651+
- Autonomy iteration badges
46004652
- Agent handoff notifications
4653+
- Elapsed time with pause tracking during approval
46014654
"""
46024655
import threading
46034656
import time
@@ -4660,7 +4713,7 @@ def _run_with_status_display(self, agent, prompt):
46604713
}
46614714

46624715
# P7/G3: Get display mode for tool args/results visibility
4663-
display_mode = getattr(self.args, 'display', 'status')
4716+
display_mode = self._resolve_display_mode()
46644717

46654718
# Phase 0: Smart action verb mapping for tool names
46664719
_TOOL_VERBS = {

0 commit comments

Comments
 (0)