-
Notifications
You must be signed in to change notification settings - Fork 56
Expand file tree
/
Copy pathtest_weather_agent.py
More file actions
137 lines (116 loc) · 3.9 KB
/
test_weather_agent.py
File metadata and controls
137 lines (116 loc) · 3.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
"""
Example test for a weather agent.
This example demonstrates testing an AI agent that provides weather information.
"""
import pytest
import scenario
import litellm
from function_schema import get_function_schema
@pytest.mark.agent_test
@pytest.mark.asyncio
@pytest.mark.flaky(reruns=2)
async def test_weather_agent():
# Integrate with your agent
class WeatherAgent(scenario.AgentAdapter):
async def call(self, input: scenario.AgentInput) -> scenario.AgentReturnTypes:
return weather_agent(input.messages)
# Define any custom assertions
def check_for_weather_tool_call(state: scenario.ScenarioState):
assert state.has_tool_call("get_current_weather")
# Run the scenario
result = await scenario.run(
name="checking the weather",
description="""
The user is planning a boat trip from Barcelona to Rome,
and is wondering what the weather will be like.
""",
agents=[
WeatherAgent(),
scenario.UserSimulatorAgent(model="openai/gpt-4.1-mini"),
],
script=[
scenario.user(),
scenario.agent(),
# Agent sometimes needs to ask for clarification
scenario.user(),
scenario.agent(),
check_for_weather_tool_call,
scenario.succeed(),
],
set_id="python-examples",
)
# Assert the simulation was successful
assert result.success
# Example agent implementation, without any frameworks
import litellm
import random
import json
def get_current_weather(city: str) -> str:
"""
Get the current weather in a given city.
Args:
city: The city to get the weather for.
Returns:
The current weather in the given city.
"""
choices = [
"sunny",
"cloudy",
"rainy",
"snowy",
]
temperature = random.randint(0, 30)
return f"The weather in {city} is {random.choice(choices)} with a temperature of {temperature}°C."
@scenario.cache()
def weather_agent(messages, response_messages=[]) -> scenario.AgentReturnTypes:
tools = [
get_current_weather,
]
response = litellm.completion(
model="openai/gpt-4.1-mini",
messages=[
{
"role": "system",
"content": """
You a helpful assistant that may help the user with weather information.
Do not guess the city if they don't provide it.
Do not ask for clarification
""",
},
*messages,
*response_messages,
],
tools=[
{"type": "function", "function": get_function_schema(tool)}
for tool in tools
],
tool_choice="auto",
)
message = response.choices[0].message # type: ignore
if message.tool_calls:
tools_by_name = {tool.__name__: tool for tool in tools}
tool_responses = []
for tool_call in message.tool_calls:
tool_call_name = tool_call.function.name
tool_call_args = json.loads(tool_call.function.arguments)
if tool_call_name in tools_by_name:
tool_call_function = tools_by_name[tool_call_name] # type: ignore
tool_call_function_response = tool_call_function(**tool_call_args)
tool_responses.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(tool_call_function_response),
}
)
else:
raise ValueError(f"Tool {tool_call_name} not found")
return weather_agent(
messages,
[
*response_messages,
message,
*tool_responses,
],
)
return [*response_messages, message] # type: ignore