Stream Chat
Examples for streaming chat completions using async generators.Basic Streaming
Copy
Ask AI
import { Gateway } from "@adaline/gateway";
import { OpenAI } from "@adaline/openai";
import { Config } from "@adaline/types";
const gateway = new Gateway();
const openai = new OpenAI();
const gpt4o = openai.chatModel({ modelName: "gpt-4o", apiKey: process.env.OPENAI_API_KEY });
for await (const chunk of gateway.streamChat({
model: gpt4o,
config: Config().parse({ temperature: 0.7, maxTokens: 1000 }),
messages: [
{ role: "user", content: [{ modality: "text", value: "Write a short story about a robot." }] },
],
tools: [],
})) {
process.stdout.write(chunk.response);
}
Collecting the Full Response
Copy
Ask AI
let fullResponse = "";
let finalUsage = null;
for await (const chunk of gateway.streamChat({
model: gpt4o,
config: Config().parse({ temperature: 0.7 }),
messages: [
{ role: "user", content: [{ modality: "text", value: "Explain machine learning." }] },
],
tools: [],
})) {
fullResponse += chunk.response;
if (chunk.usage) {
finalUsage = chunk.usage;
}
}
console.log("Full response:", fullResponse);
console.log("Token usage:", finalUsage);
Streaming with Callbacks
Copy
Ask AI
const gateway = new Gateway();
for await (const chunk of gateway.streamChat({
model: gpt4o,
config: Config().parse({ temperature: 0.7 }),
messages: [
{ role: "user", content: [{ modality: "text", value: "Hello!" }] },
],
tools: [],
callbacks: {
onChunk: (chunk) => {
// Called for each chunk
console.log("Chunk:", chunk.response);
},
onComplete: (response) => {
// Called when streaming finishes
console.log("Done:", response.usage);
},
onError: (error) => {
console.error("Stream error:", error);
},
},
})) {
// Chunks are also yielded by the generator
}