Skip to main content

Overview

Logging types enable comprehensive tracking of AI applications, from high-level traces to granular span content with various operation types.

Span Content Types

LogSpanContent (Union)

Polymorphic content type for different span categories.
type LogSpanContent =
  | LogSpanModelContent
  | LogSpanModelStreamContent
  | LogSpanEmbeddingsContent
  | LogSpanFunctionContent
  | LogSpanToolContent
  | LogSpanGuardrailContent
  | LogSpanRetrievalContent
  | LogSpanOtherContent;
Type Narrowing:
function handleSpanContent(content: LogSpanContent) {
  switch (content.type) {
    case 'Model':
      console.log(`LLM: ${content.provider}/${content.model}, Cost: $${content.cost}`);
      break;
    case 'ModelStream':
      console.log(`Streaming: ${content.provider}`);
      break;
    case 'Embeddings':
      console.log('Embedding generation');
      break;
    case 'Tool':
      console.log('Tool execution');
      break;
    case 'Retrieval':
      console.log('Vector search');
      break;
    case 'Function':
      console.log('Custom function');
      break;
    case 'Guardrail':
      console.log('Safety check');
      break;
    case 'Other':
      console.log('Other operation');
      break;
  }
}

LogSpanModelContent

LLM inference span content.
interface LogSpanModelContent {
  type: 'Model';
  provider: string;                // 1-512 chars
  model: string;                   // 1-512 chars
  variables?: Record<string, Variable> | null;
  cost?: number | null;            // minimum: 0
  input: LogSpanContentJson;
  output: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
The input and output fields must be valid JSON strings. For best results, pass the raw request/response payloads from the AI provider directly — Adaline will automatically parse and enrich spans with token usage, cost breakdowns, and other provider-specific metadata. See the Logs API Reference for full details on supported formats and examples.
Example:
span.update({
  content: {
    type: 'Model',
    provider: 'openai',
    model: 'gpt-4o',
    variables: { userName: 'John' },
    cost: 0.002,
    input: JSON.stringify([{ role: 'user', content: 'Hello' }]),
    output: JSON.stringify({ role: 'assistant', content: 'Hi!' }),
    expected: JSON.stringify({ role: 'assistant', content: 'Hello!' })
  }
});

LogSpanModelStreamContent

Streaming LLM inference content.
interface LogSpanModelStreamContent {
  type: 'ModelStream';
  provider: string;
  model: string;
  variables?: Record<string, Variable> | null;
  cost?: number | null;
  input: LogSpanContentJson;
  output: string;                  // Raw stream chunks
  aggregateOutput: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
The input and aggregateOutput fields must be valid JSON strings. Passing the raw provider payloads enables Adaline to automatically extract token usage and other metadata. See the Logs API Reference for supported formats.
Example:
let chunks = '';
for await (const chunk of stream) {
  chunks += chunk;
}

span.update({
  content: {
    type: 'ModelStream',
    provider: 'anthropic',
    model: 'claude-3-opus',
    input: JSON.stringify(messages),
    output: chunks,
    aggregateOutput: JSON.stringify({ role: 'assistant', content: fullResponse }),
    cost: 0.005
  }
});

LogSpanEmbeddingsContent

Embedding generation span content.
interface LogSpanEmbeddingsContent {
  type: 'Embeddings';
  input: LogSpanContentJson;
  output: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
Example:
span.update({
  content: {
    type: 'Embeddings',
    input: JSON.stringify({ texts: ['query'], model: 'text-embedding-3-large' }),
    output: JSON.stringify({ embeddings: [[0.1, 0.2, ...]], dimensions: 3072 })
  }
});

LogSpanFunctionContent

Custom function execution span content.
interface LogSpanFunctionContent {
  type: 'Function';
  input: LogSpanContentJson;
  output: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
Example:
span.update({
  content: {
    type: 'Function',
    input: JSON.stringify({ operation: 'process', id: 123 }),
    output: JSON.stringify({ result: 'success', items: 42 })
  }
});

LogSpanToolContent

Tool execution span content.
interface LogSpanToolContent {
  type: 'Tool';
  input: LogSpanContentJson;
  output: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
Example:
span.update({
  content: {
    type: 'Tool',
    input: JSON.stringify({ function: 'get_weather', city: 'Paris' }),
    output: JSON.stringify({ temp: 24, conditions: 'sunny' })
  }
});

LogSpanGuardrailContent

Safety/compliance check span content.
interface LogSpanGuardrailContent {
  type: 'Guardrail';
  input: LogSpanContentJson;
  output: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
Example:
span.update({
  content: {
    type: 'Guardrail',
    input: JSON.stringify({ text: 'User input', checks: ['toxicity', 'pii'] }),
    output: JSON.stringify({ safe: true, scores: { toxicity: 0.05 } })
  }
});

LogSpanRetrievalContent

RAG/retrieval span content.
interface LogSpanRetrievalContent {
  type: 'Retrieval';
  input: LogSpanContentJson;
  output: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
Example:
span.update({
  content: {
    type: 'Retrieval',
    input: JSON.stringify({ query: 'What is AI?', topK: 5 }),
    output: JSON.stringify({ documents: [{id: 'doc1', score: 0.95}] })
  }
});

LogSpanOtherContent

Custom span content.
interface LogSpanOtherContent {
  type: 'Other';
  input: LogSpanContentJson;
  output: LogSpanContentJson;
  expected?: LogSpanContentJson | null;
}
Example:
span.update({
  content: {
    type: 'Other',
    input: JSON.stringify({ custom: 'data' }),
    output: JSON.stringify({ result: 'output' })
  }
});

Supporting Types

LogSpanContentJson

A string that must contain valid JSON. Although typed as string, the backend parses this value as JSON and will reject invalid payloads.
type LogSpanContentJson = string;
Always use JSON.stringify() (TypeScript) or json.dumps() (Python) to produce these values. Passing a non-JSON string will cause an API error.
Example:
const input: LogSpanContentJson = JSON.stringify({ query: 'test' });
const output: LogSpanContentJson = JSON.stringify({ result: 'success' });

LogAttributes

Key-value metadata for logs.
type LogAttributes = Record<string, string | number | boolean>;
Example:
const attributes: LogAttributes = {
  userId: 'user-123',
  latency: 1234,
  cached: false,
  region: 'us-east-1',
  retryCount: 2
};

trace.update({ attributes });

LogTags

Array of string tags for categorization.
type LogTags = string[]; // each tag: 1-256 chars
Example:
const tags: LogTags = [
  'api',
  'production',
  'high-priority',
  'llm',
  'gpt-4o'
];

trace.update({ tags });

LogSpanParsedContent

Parsed token usage metrics from LLM inference.
interface LogSpanParsedContent {
  type: 'Model';
  promptTokens: number;            // minimum: 0
  completionTokens: number;        // minimum: 0
  totalTokens: number;             // minimum: 0
}
Example:
const usage: LogSpanParsedContent = {
  type: 'Model',
  promptTokens: 100,
  completionTokens: 50,
  totalTokens: 150
};

Complete Example

import { Adaline } from '@adaline/client';
import type { LogSpanContent } from '@adaline/api';
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';

const adaline = new Adaline();
const gateway = new Gateway();
const openaiProvider = new OpenAI();
const monitor = adaline.initMonitor({ projectId: 'my-project' });

async function trackedLLMCall(userMessage: string) {
  const deployment = await adaline.getLatestDeployment({
    promptId: 'chat-prompt',
    deploymentEnvironmentId: 'environment_abc123'
  });

  const trace = monitor.logTrace({
    name: 'Chat Request',
    tags: ['chat', 'production'],
    attributes: { messageLength: userMessage.length }
  });

  const span = trace.logSpan({
    name: 'LLM Call',
    promptId: deployment.promptId,
    deploymentId: deployment.id,
    tags: ['llm', deployment.prompt.config.providerName]
  });

  try {
    const model = openaiProvider.chatModel({
      modelName: deployment.prompt.config.model,
      apiKey: process.env.OPENAI_API_KEY!
    });

    const gatewayResponse = await gateway.completeChat({
      model,
      config: deployment.prompt.config.settings,
      messages: [
        ...deployment.prompt.messages,
        {
          role: 'user',
          content: [{ modality: 'text', value: userMessage }]
        }
      ],
      tools: deployment.prompt.tools
    });

    const content: LogSpanContent = {
      type: 'Model',
      provider: deployment.prompt.config.providerName,
      model: deployment.prompt.config.model,
      input: JSON.stringify(gatewayResponse.provider.request),
      output: JSON.stringify(gatewayResponse.provider.response)
    };

    span.update({ status: 'success', content });
    trace.update({ status: 'success' });

    return gatewayResponse.response.messages[0].content[0].value;

  } catch (error) {
    span.update({ status: 'failure' });
    trace.update({ status: 'failure' });
    throw error;

  } finally {
    span.end();
    trace.end();
  }
}