-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathenv.example
More file actions
93 lines (77 loc) · 3.84 KB
/
env.example
File metadata and controls
93 lines (77 loc) · 3.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# Copy to .env: `cp env.example .env`
# Required - the model the executor runs.
AGENT_MODEL=gemini-3.1-flash-lite-preview
# Optional separate model for the planner and replanner.
# Falls back to AGENT_MODEL when unset.
AGENT_PLANNER_MODEL=gemini-3.1-flash-lite-preview
# Optional separate model for the final answer synthesizer.
# Falls back to AGENT_MODEL when unset. It can make sense to put a larger
# model here when the executor is a small tool-calling model.
# AGENT_SYNTHESIZER_MODEL=gemini-3.1-flash-lite-preview
# Per-stage provider override (advanced). Use these when the planner or
# synthesizer should run on a DIFFERENT provider / endpoint / API key than
# the executor. Each block is fully optional - omit a variable to inherit
# the top-level AGENT_* default.
#
# Cross-provider override caveat: setting AGENT_PLANNER_PROVIDER_TYPE to a
# different vendor than AGENT_PROVIDER_TYPE requires AGENT_PLANNER_API_KEY
# (the agent refuses to inherit a key across vendors).
#
# AGENT_PLANNER_PROVIDER_TYPE=anthropic
# AGENT_PLANNER_BASE_URL=
# AGENT_PLANNER_API_KEY=sk-ant-...
#
# AGENT_SYNTHESIZER_PROVIDER_TYPE=openai
# AGENT_SYNTHESIZER_BASE_URL=
# AGENT_SYNTHESIZER_API_KEY=sk-...
# Provider type: openai | anthropic | google | openai-compatible
#
# For Gemini USE google (the native API): the OpenAI-compat endpoint is broken
# for structured outputs (responseFormat is ignored) and for streamed
# tool_calls (the `index` field is missing). The google provider hits the
# native API directly and behaves correctly.
#
# openai-compatible is meant for self-hosted OpenAI-spec servers
# (vLLM, ollama, etc.) that follow the spec strictly.
AGENT_PROVIDER_TYPE=google
# Optional for openai/anthropic/google (the SDK default endpoint is used);
# required for openai-compatible.
# AGENT_BASE_URL=
AGENT_API_KEY=replace-me
# Client name (forwarded as User-Agent on MCP/provider requests).
AGENT_CLIENT_NAME=vercel-mcp-test
# none | error | warn | info | debug
AGENT_LOG_LEVEL=info
# Cap on plan/execute/replan iterations. Guards against runaway loops.
AGENT_MAX_ITERATIONS=10
# Cap on LLM steps inside a single executor call (multi-step tool calling).
# A `tool-call -> tool-result -> assistant-message` cycle counts as 2 steps.
# Sensible minimum is 2 (one tool + final answer); recommended 6-10.
AGENT_MAX_STEPS_PER_TASK=8
# Per-LLM-call timeout (planner / executor / replanner / synthesizer), in ms.
# 0 or empty disables the timeout. 60000-120000 is recommended in production.
# AGENT_LLM_TIMEOUT_MS=90000
# How many times to retry an LLM call on 5xx / 429 / network errors. Default 2.
# AGENT_LLM_MAX_RETRIES=2
# Cap on the number of "revise" decisions the replanner can make per run.
# Prevents the LLM from looping in "revise -> execute -> revise". Default 2.
# AGENT_MAX_REVISIONS=2
# Hard cap on total tokens per run (input + output). When crossed the agent
# exits the execution loop and proceeds to synthesize the final answer.
# AGENT_MAX_TOTAL_TOKENS=200000
# Tool selection strategy for the executor:
# - all (default) every step sees the full filtered ToolSet.
# Works well with <=50 tools.
# - plan-narrowed executor gets only the tools listed in
# step.suggestedTools. The planner MUST populate
# suggestedTools for every step that needs tools.
# Cuts context tokens significantly when the catalog
# has 100+ tools.
# AGENT_TOOL_SELECTION_STRATEGY=all
# Fine-grained MCP tool control. Names use the `serverName__toolName` format.
# availableTools wins over excludedTools.
# AGENT_AVAILABLE_TOOLS=demo__list_companies,demo__get_company
# AGENT_EXCLUDED_TOOLS=
# Remote MCP servers. JSON: Record<name, { url, headers? }>.
# headers.Authorization: 'Bearer <token>' for pre-baked authorization.
MCP_SERVERS={"demo":{"url":"https://mcp.miniaccountant.app/v1beta"}}