Skip to main content

TypeScript SDK

The TypeScript SDK provides a complete toolkit for integrating Adaline’s LLM deployment and observability features into your AI applications.

Installation

npm install @adaline/client @adaline/api

Quick Start

import { Adaline } from '@adaline/client';

// Initialize client (API key from ADALINE_API_KEY env var)
const adaline = new Adaline();

// Get latest deployment
const deployment = await adaline.getLatestDeployment({
  promptId: 'your-prompt-id',
  deploymentEnvironmentId: 'your-deployment-environment-id'
});

// Initialize monitoring
const monitor = adaline.initMonitor({
  projectId: 'your-project-id'
});

// Create trace and spans
const trace = monitor.logTrace({
  name: 'User Request',
  sessionId: 'session-123'
});

const span = trace.logSpan({
  name: 'LLM Call',
  promptId: deployment.promptId,
  deploymentId: deployment.id
});

// Do LLM call using the 'deployment'

// Update the span with the response
span.update({
  status: 'success',
  content: {
    type: 'Model',
    provider: deployment.prompt.config.providerName,
    model: deployment.prompt.config.model,
    input: JSON.stringify(deployment.prompt.messages),
    output: JSON.stringify(response)
  }
});

trace.end();

Type Definitions

The SDK includes full TypeScript type definitions. Most types are imported from @adaline/api:
// API types (deployment, messages, content, logging)
import type {
  Deployment,
  PromptSnapshot,
  PromptMessage,
  MessageContent,
  LogSpanContent,
  CreateLogTraceRequest,
  CreateLogSpanRequest
} from '@adaline/api';

// SDK-specific types
import type {
  TraceStatus,
  SpanStatus,
  BufferedEntry
} from '@adaline/client';
See Types Reference for complete type documentation.

Error Handling

The SDK uses automatic retry logic with exponential backoff:
  • 5xx errors: Automatically retried up to 10 times within a 20s budget
  • 4xx errors: Fail immediately (no retry)
  • Network errors: Retried with exponential backoff
Failed flush entries are dropped and counted via monitor.droppedCount. Successfully sent entries are tracked via monitor.sentCount.

Real-World Examples

Example 1: RAG Pipeline

import { Adaline } from '@adaline/client';
import { Gateway } from '@adaline/gateway';
import { OpenAI as OpenAIProvider } from '@adaline/openai';
import OpenAI from 'openai';
import { ChromaClient } from 'chromadb';

const adaline = new Adaline();
const gateway = new Gateway();
const openaiProvider = new OpenAIProvider();
const openai = new OpenAI();
const chroma = new ChromaClient();

const monitor = adaline.initMonitor({ projectId: 'rag-system' });

async function answerQuestion(sessionId: string, question: string) {
  const trace = monitor.logTrace({
    name: 'RAG Query',
    sessionId,
    tags: ['rag', 'qa']
  });

  try {
    // Step 1: Generate embedding
    const embedSpan = trace.logSpan({
      name: 'Generate Query Embedding',
      tags: ['embedding']
    });

    const embeddingRequest = {
      model: 'text-embedding-3-large',
      input: question
    };

    const embeddingResponse = await openai.embeddings.create(embeddingRequest);

    const embedding = embeddingResponse.data[0].embedding;

    embedSpan.update({
      status: 'success',
      content: {
        type: 'Embeddings',
        input: JSON.stringify(embeddingRequest),
        output: JSON.stringify(embeddingResponse)
      }
    });
    embedSpan.end();

    // Step 2: Retrieve documents
    const retrievalSpan = trace.logSpan({
      name: 'Vector Search',
      tags: ['retrieval']
    });

    const collection = await chroma.getCollection({ name: 'docs' });
    const results = await collection.query({
      queryEmbeddings: [embedding],
      nResults: 5
    });

    retrievalSpan.update({
      status: 'success',
      content: {
        type: 'Retrieval',
        input: JSON.stringify({ query: question, topK: 5 }),
        output: JSON.stringify({
          documentIds: results.ids[0],
          scores: results.distances?.[0]
        })
      }
    });
    retrievalSpan.end();

    // Step 3: Get LLM deployment
    const deployment = await adaline.getLatestDeployment({
      promptId: 'rag-answer-prompt',
      deploymentEnvironmentId: 'environment_abc123'
    });

    // Step 4: Generate answer
    const llmSpan = trace.logSpan({
      name: 'Generate Answer',
      promptId: deployment.promptId,
      deploymentId: deployment.id,
      runEvaluation: true,
      tags: ['llm', 'answer']
    });

    const context = results.documents[0].join('\n\n');

    const model = openaiProvider.chatModel({
      modelName: deployment.prompt.config.model,
      apiKey: process.env.OPENAI_API_KEY!
    });

    const gatewayResponse = await gateway.completeChat({
      model,
      config: deployment.prompt.config.settings,
      messages: [
        ...deployment.prompt.messages,
        {
          role: 'user',
          content: [{ modality: 'text', value: `Context:\n${context}\n\nQuestion: ${question}` }]
        }
      ],
      tools: deployment.prompt.tools
    });

    const answer = gatewayResponse.response.messages[0].content[0].value;

    llmSpan.update({
      status: 'success',
      content: {
        type: 'Model',
        provider: deployment.prompt.config.providerName,
        model: deployment.prompt.config.model,
        input: JSON.stringify(gatewayResponse.provider.request),
        output: JSON.stringify(gatewayResponse.provider.response)
      },
      attributes: {
        contextDocs: results.ids[0].length
      }
    });
    llmSpan.end();

    trace.update({ status: 'success' });

    return answer;

  } catch (error) {
    trace.update({ status: 'failure', attributes: { error: String(error) } });
    throw error;
  } finally {
    trace.end();
  }
}

Example 2: Multi-Agent System

import { Adaline } from '@adaline/client';
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';

const adaline = new Adaline();
const gateway = new Gateway();
const openaiProvider = new OpenAI();
const monitor = adaline.initMonitor({ projectId: 'multi-agent' });

async function processRequest(userId: string, task: string) {
  const trace = monitor.logTrace({
    name: 'Multi-Agent Task',
    sessionId: userId,
    tags: ['agents', 'orchestration']
  });

  try {
    // Router agent decides which specialist to use
    const routerSpan = trace.logSpan({
      name: 'Router Agent',
      tags: ['agent', 'router']
    });

    const routerDeployment = await adaline.getLatestDeployment({
      promptId: 'router-agent-prompt',
      deploymentEnvironmentId: 'environment_abc123'
    });

    const routerModel = openaiProvider.chatModel({
      modelName: routerDeployment.prompt.config.model,
      apiKey: process.env.OPENAI_API_KEY!
    });

    const routerGatewayResponse = await gateway.completeChat({
      model: routerModel,
      config: routerDeployment.prompt.config.settings,
      messages: [
        ...routerDeployment.prompt.messages,
        {
          role: 'user',
          content: [{ modality: 'text', value: task }]
        }
      ],
      tools: routerDeployment.prompt.tools
    });

    const selectedAgent = JSON.parse(
      String(routerGatewayResponse.response.messages[0].content[0].value) || '{}'
    ).agent;

    routerSpan.update({
      status: 'success',
      content: {
        type: 'Model',
        provider: routerDeployment.prompt.config.providerName,
        model: routerDeployment.prompt.config.model,
        input: JSON.stringify(routerGatewayResponse.provider.request),
        output: JSON.stringify(routerGatewayResponse.provider.response)
      },
      attributes: { selectedAgent }
    });
    routerSpan.end();

    // Execute specialist agent
    const specialistSpan = trace.logSpan({
      name: `${selectedAgent} Agent`,
      tags: ['agent', 'specialist', selectedAgent]
    });

    const specialistDeployment = await adaline.getLatestDeployment({
      promptId: `${selectedAgent}-agent-prompt`,
      deploymentEnvironmentId: 'environment_abc123'
    });

    const specialistModel = openaiProvider.chatModel({
      modelName: specialistDeployment.prompt.config.model,
      apiKey: process.env.OPENAI_API_KEY!
    });

    const specialistGatewayResponse = await gateway.completeChat({
      model: specialistModel,
      config: specialistDeployment.prompt.config.settings,
      messages: [
        ...specialistDeployment.prompt.messages,
        {
          role: 'user',
          content: [{ modality: 'text', value: task }]
        }
      ],
      tools: specialistDeployment.prompt.tools
    });

    specialistSpan.update({
      status: 'success',
      content: {
        type: 'Model',
        provider: specialistDeployment.prompt.config.providerName,
        model: specialistDeployment.prompt.config.model,
        input: JSON.stringify(specialistGatewayResponse.provider.request),
        output: JSON.stringify(specialistGatewayResponse.provider.response)
      }
    });
    specialistSpan.end();

    trace.update({ status: 'success', attributes: { agent: selectedAgent } });

    return specialistGatewayResponse.response.messages[0].content[0].value;

  } catch (error) {
    trace.update({ status: 'failure', attributes: { error: String(error) } });
    throw error;
  } finally {
    trace.end();
  }
}