Overview
Logging types enable comprehensive tracking of AI applications, from high-level traces to granular span content with various operation types.Span Content Types
LogSpanContent (Union)
Polymorphic content type for different span categories.Copy
Ask AI
type LogSpanContent =
| LogSpanModelContent
| LogSpanModelStreamContent
| LogSpanEmbeddingsContent
| LogSpanFunctionContent
| LogSpanToolContent
| LogSpanGuardrailContent
| LogSpanRetrievalContent
| LogSpanOtherContent;
Copy
Ask AI
function handleSpanContent(content: LogSpanContent) {
switch (content.type) {
case 'Model':
console.log(`LLM: ${content.provider}/${content.model}, Cost: $${content.cost}`);
break;
case 'ModelStream':
console.log(`Streaming: ${content.provider}`);
break;
case 'Embeddings':
console.log('Embedding generation');
break;
case 'Tool':
console.log('Tool execution');
break;
case 'Retrieval':
console.log('Vector search');
break;
case 'Function':
console.log('Custom function');
break;
case 'Guardrail':
console.log('Safety check');
break;
case 'Other':
console.log('Other operation');
break;
}
}
LogSpanModelContent
LLM inference span content.Copy
Ask AI
interface LogSpanModelContent {
type: 'Model';
provider: string; // 1-512 chars
model: string; // 1-512 chars
variables?: Record<string, Variable> | null;
cost?: number | null; // minimum: 0
input: LogSpanContentJson;
output: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
span.update({
content: {
type: 'Model',
provider: 'openai',
model: 'gpt-4o',
variables: { userName: 'John' },
cost: 0.002,
input: JSON.stringify([{ role: 'user', content: 'Hello' }]),
output: JSON.stringify({ role: 'assistant', content: 'Hi!' }),
expected: JSON.stringify({ role: 'assistant', content: 'Hello!' })
}
});
LogSpanModelStreamContent
Streaming LLM inference content.Copy
Ask AI
interface LogSpanModelStreamContent {
type: 'ModelStream';
provider: string;
model: string;
variables?: Record<string, Variable> | null;
cost?: number | null;
input: LogSpanContentJson;
output: string; // Raw stream chunks
aggregateOutput: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
let chunks = '';
for await (const chunk of stream) {
chunks += chunk;
}
span.update({
content: {
type: 'ModelStream',
provider: 'anthropic',
model: 'claude-3-opus',
input: JSON.stringify(messages),
output: chunks,
aggregateOutput: JSON.stringify({ role: 'assistant', content: fullResponse }),
cost: 0.005
}
});
LogSpanEmbeddingsContent
Embedding generation span content.Copy
Ask AI
interface LogSpanEmbeddingsContent {
type: 'Embeddings';
input: LogSpanContentJson;
output: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
span.update({
content: {
type: 'Embeddings',
input: JSON.stringify({ texts: ['query'], model: 'text-embedding-3-large' }),
output: JSON.stringify({ embeddings: [[0.1, 0.2, ...]], dimensions: 3072 })
}
});
LogSpanFunctionContent
Custom function execution span content.Copy
Ask AI
interface LogSpanFunctionContent {
type: 'Function';
input: LogSpanContentJson;
output: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
span.update({
content: {
type: 'Function',
input: JSON.stringify({ operation: 'process', id: 123 }),
output: JSON.stringify({ result: 'success', items: 42 })
}
});
LogSpanToolContent
Tool execution span content.Copy
Ask AI
interface LogSpanToolContent {
type: 'Tool';
input: LogSpanContentJson;
output: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
span.update({
content: {
type: 'Tool',
input: JSON.stringify({ function: 'get_weather', city: 'Paris' }),
output: JSON.stringify({ temp: 24, conditions: 'sunny' })
}
});
LogSpanGuardrailContent
Safety/compliance check span content.Copy
Ask AI
interface LogSpanGuardrailContent {
type: 'Guardrail';
input: LogSpanContentJson;
output: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
span.update({
content: {
type: 'Guardrail',
input: JSON.stringify({ text: 'User input', checks: ['toxicity', 'pii'] }),
output: JSON.stringify({ safe: true, scores: { toxicity: 0.05 } })
}
});
LogSpanRetrievalContent
RAG/retrieval span content.Copy
Ask AI
interface LogSpanRetrievalContent {
type: 'Retrieval';
input: LogSpanContentJson;
output: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
span.update({
content: {
type: 'Retrieval',
input: JSON.stringify({ query: 'What is AI?', topK: 5 }),
output: JSON.stringify({ documents: [{id: 'doc1', score: 0.95}] })
}
});
LogSpanOtherContent
Custom span content.Copy
Ask AI
interface LogSpanOtherContent {
type: 'Other';
input: LogSpanContentJson;
output: LogSpanContentJson;
expected?: LogSpanContentJson | null;
}
Copy
Ask AI
span.update({
content: {
type: 'Other',
input: JSON.stringify({ custom: 'data' }),
output: JSON.stringify({ result: 'output' })
}
});
Supporting Types
LogSpanContentJson
JSON string that must be valid JSON.Copy
Ask AI
type LogSpanContentJson = string;
Copy
Ask AI
const input: LogSpanContentJson = JSON.stringify({ query: 'test' });
const output: LogSpanContentJson = JSON.stringify({ result: 'success' });
LogAttributes
Key-value metadata for logs.Copy
Ask AI
type LogAttributes = Record<string, string | number | boolean>;
Copy
Ask AI
const attributes: LogAttributes = {
userId: 'user-123',
latency: 1234,
cached: false,
region: 'us-east-1',
retryCount: 2
};
trace.update({ attributes });
LogTags
Array of string tags for categorization.Copy
Ask AI
type LogTags = string[]; // each tag: 1-256 chars
Copy
Ask AI
const tags: LogTags = [
'api',
'production',
'high-priority',
'llm',
'gpt-4o'
];
trace.update({ tags });
LogSpanParsedContent
Parsed token usage metrics from LLM inference.Copy
Ask AI
interface LogSpanParsedContent {
type: 'Model';
promptTokens: number; // minimum: 0
completionTokens: number; // minimum: 0
totalTokens: number; // minimum: 0
}
Copy
Ask AI
const usage: LogSpanParsedContent = {
type: 'Model',
promptTokens: 100,
completionTokens: 50,
totalTokens: 150
};
Complete Example
Copy
Ask AI
import { Adaline } from '@adaline/api';
import type { LogSpanContent, LogAttributes, LogTags } from '@adaline/api';
import OpenAI from 'openai';
const adaline = new Adaline();
const openai = new OpenAI();
const monitor = adaline.initMonitor({ projectId: 'my-project' });
async function trackedLLMCall(userMessage: string) {
const trace = monitor.logTrace({
name: 'Chat Request',
tags: ['chat', 'production'],
attributes: { messageLength: userMessage.length }
});
const span = trace.logSpan({
name: 'OpenAI Call',
tags: ['llm', 'openai', 'gpt-4o']
});
try {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: userMessage }]
});
const content: LogSpanContent = {
type: 'Model',
provider: 'openai',
model: 'gpt-4o',
input: JSON.stringify([{ role: 'user', content: userMessage }]),
output: JSON.stringify(response.choices[0].message),
cost: calculateCost(response.usage)
};
const attributes: LogAttributes = {
promptTokens: response.usage?.prompt_tokens || 0,
completionTokens: response.usage?.completion_tokens || 0,
totalTokens: response.usage?.total_tokens || 0
};
span.update({ status: 'success', content, attributes });
trace.update({ status: 'success' });
return response.choices[0].message.content;
} catch (error) {
span.update({ status: 'failure' });
trace.update({ status: 'failure' });
throw error;
} finally {
span.end();
trace.end();
}
}
function calculateCost(usage: any): number {
if (!usage) return 0;
return (usage.prompt_tokens / 1000) * 0.005 +
(usage.completion_tokens / 1000) * 0.015;
}