Skip to content

Commit 3a9d50c

Browse files
authored
Merge pull request #85 from langtail/ai-v5
Add support for ai sdk v5
2 parents 8980aba + e1ff550 commit 3a9d50c

16 files changed

+1849
-719
lines changed

CHANGELOG.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,22 @@
11
# Changelog
22

3+
## 1.0.0
4+
5+
### Breaking Changes
6+
7+
- **Vercel AI SDK v5 support** - This version requires `@ai-sdk/provider` v2.0.0 and `@ai-sdk/provider-utils` v3.0.1
8+
- Previous versions of Vercel AI SDK (v4) are no longer supported
9+
- Updated `ai` dev dependency to v5.0.0
10+
11+
### Changes
12+
13+
- Refactored `langtail-language-model.ts` for AI SDK v5 compatibility
14+
- Updated `convert-to-openai-chat-messages.ts` for new message format
15+
- Updated `map-langtail-finish-reason.ts` for v5 finish reasons
16+
- Updated `openai-prepare-tools.ts` for v5 tool format
17+
- Removed deprecated `map-openai-chat-logprobs.ts`
18+
- Added comprehensive test coverage for language model and message conversion
19+
320
## 0.16.8
421

522
- Add GoogleGeminiV1 to ReasoningFormat and update related tests

package.json

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "langtail",
3-
"version": "0.16.8",
3+
"version": "1.0.0",
44
"description": "",
55
"main": "./Langtail.js",
66
"packageManager": "pnpm@8.15.6",
@@ -35,8 +35,9 @@
3535
],
3636
"license": "MIT",
3737
"devDependencies": {
38+
"@openrouter/ai-sdk-provider": "^1.2.7",
3839
"@types/node": "^20.12.11",
39-
"ai": "^4.3.3",
40+
"ai": "^5.0.0",
4041
"copyfiles": "^2.4.1",
4142
"fs-extra": "^11.2.0",
4243
"jsdom": "^24.1.0",
@@ -91,16 +92,16 @@
9192
}
9293
},
9394
"dependencies": {
94-
"@ai-sdk/provider": "^1.1.1",
95-
"@ai-sdk/provider-utils": "^2.2.5",
95+
"@ai-sdk/provider": "2.0.0",
96+
"@ai-sdk/provider-utils": "3.0.1",
9697
"@langtail/handlebars-evalless": "^0.1.2",
9798
"commander": "^12.1.0",
9899
"date-fns": "^3.6.0",
99100
"dotenv-flow": "^4.1.0",
100101
"json-schema-to-zod": "^2.1.0",
101102
"openai": "4.82.0",
102103
"query-string": "^7.1.3",
103-
"zod": "^3.23.8"
104+
"zod": "^3.25.76"
104105
},
105106
"tsup": {
106107
"dts": true,

playground/openrouter.ts

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import "dotenv-flow/config"
2+
import { stepCountIs, streamText, tool } from "ai"
3+
import { z } from "zod/v4"
4+
5+
import { openrouter } from "@openrouter/ai-sdk-provider"
6+
7+
async function main() {
8+
const result = streamText({
9+
model: openrouter("google/gemini-3-pro-preview"),
10+
messages: [
11+
{
12+
role: "user",
13+
content: "What is the weather in Tokyo?",
14+
},
15+
],
16+
stopWhen: stepCountIs(5),
17+
onStepFinish: (step) => {
18+
console.log(step.content)
19+
},
20+
tools: {
21+
weather: tool({
22+
description: "Get the weather in a location",
23+
inputSchema: z.object({
24+
location: z.string().describe("The location to get the weather for"),
25+
}),
26+
execute: async ({ location }) => {
27+
return {
28+
location,
29+
temperature: 12,
30+
}
31+
},
32+
}),
33+
},
34+
})
35+
36+
for await (const chunk of result.fullStream) {
37+
console.log("chunk", chunk)
38+
}
39+
}
40+
41+
main()

playground/vercel.ts

Lines changed: 35 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -1,86 +1,57 @@
1-
import 'dotenv-flow/config'
2-
import { generateText, tool, streamText } from 'ai';
3-
import { langtail } from '../src/vercel-ai';
4-
import fs from 'fs/promises';
5-
import { z } from 'zod';
6-
1+
import "dotenv-flow/config"
2+
import { stepCountIs, streamText, tool } from "ai"
3+
import { langtail } from "../src/vercel-ai"
4+
import { z } from "zod/v4"
75

86
async function main() {
9-
const content = await fs.readFile('./playground/text.txt', 'utf-8');
10-
11-
const { text, reasoning } = await generateText({
12-
model: langtail('vtip'),
7+
const result = streamText({
8+
model: langtail("vtip"),
139
messages: [
1410
{
15-
role: 'user',
11+
role: "user",
1612
content: "What is the weather in Tokyo?",
1713
},
1814
],
15+
stopWhen: stepCountIs(5),
16+
onStepFinish: (step) => {
17+
console.log(step.content)
18+
},
1919
tools: {
2020
weather: tool({
21-
description: 'Get the weather in a location',
22-
parameters: z.object({
23-
location: z.string().describe('The location to get the weather for'),
21+
description: "Get the weather in a location",
22+
inputSchema: z.object({
23+
location: z.string().describe("The location to get the weather for"),
2424
}),
25-
execute: async ({ location }) => {
25+
async execute({ location }) {
2626
return {
27-
location,
28-
temperature: 12,
27+
type: "image",
28+
data: "https://stickerapp.co.uk/cdn-assets/images/stickers/608t.png",
2929
}
3030
},
31-
}),
32-
},
33-
});
3431

35-
const { text: text2, reasoning: reasoning2 } = await generateText({
36-
model: langtail('vtip'),
37-
providerOptions: {
38-
anthropic: {
39-
thinking: {
40-
budgetTokens: 1025,
41-
type: "enabled",
42-
}
43-
}
44-
},
45-
messages: [
46-
{
47-
role: 'user',
48-
content: "What is the weather in Tokyo?",
49-
},
50-
{
51-
role: 'assistant',
52-
content: text,
53-
},
54-
{
55-
role: 'user',
56-
content: "What is the weather in Prague?",
57-
},
58-
],
59-
tools: {
60-
weather: tool({
61-
description: 'Get the weather in a location',
62-
parameters: z.object({
63-
location: z.string().describe('The location to get the weather for'),
64-
}),
65-
execute: async ({ location }) => {
32+
// map to tool result content for LLM consumption:
33+
toModelOutput(result) {
6634
return {
67-
location,
68-
temperature: 12,
35+
type: "content",
36+
value:
37+
typeof result === "string"
38+
? [{ type: "text", text: result }]
39+
: [
40+
{
41+
type: "media",
42+
data: result.data,
43+
mediaType: "image/png",
44+
},
45+
],
6946
}
7047
},
7148
}),
7249
},
73-
});
74-
75-
console.log(text);
76-
console.log(reasoning);
77-
78-
console.log('--------------------------------');
79-
80-
console.log(text2);
81-
console.log(reasoning2);
50+
})
8251

52+
for await (const chunk of result.fullStream) {
53+
console.log("chunk", chunk)
54+
}
8355
}
8456

85-
main();
86-
57+
main()

0 commit comments

Comments
 (0)