Skip to main content

Adaline Class

The Adaline class is the main entry point for the TypeScript SDK. It provides methods for fetching deployments and initializing monitors for observability.

Constructor

new Adaline(options?: AdalineOptions)

Parameters

options
object
Configuration options for the Adaline client

Example

import { Adaline } from '@adaline/client';

// Uses ADALINE_API_KEY environment variable
const adaline = new Adaline();

Methods

getDeployment()

Fetch a specific deployment by its ID.
async getDeployment(options: GetDeploymentOptions): Promise<Deployment>

Parameters

options
object
required

Returns

deployment
Deployment
The Deployment object.See Deployment Type for complete documentation.

Example

const deployment = await adaline.getDeployment({
  promptId: 'prompt_abc123',
  deploymentId: 'deploy_xyz789'
});

console.log(deployment.prompt.config.model); // 'gpt-4o'
console.log(deployment.prompt.messages);     // Array of messages
console.log(deployment.prompt.tools);        // Array of tools

getLatestDeployment()

Fetch the latest deployment for a prompt in a specific environment.
async getLatestDeployment(options: GetLatestDeploymentOptions): Promise<Deployment>

Parameters

options
object
required

Returns

deployment
Deployment
The latest Deployment object for the specified environment.See Deployment Type for complete documentation.

Example

const deployment = await adaline.getLatestDeployment({
  promptId: 'prompt_abc123',
  deploymentEnvironmentId: 'environment_abc123'
});

// Use Adaline Gateway to call the LLM with deployment config
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';

const gateway = new Gateway();
const openaiProvider = new OpenAI();

const model = openaiProvider.chatModel({
  modelName: deployment.prompt.config.model,
  apiKey: process.env.OPENAI_API_KEY!
});

const response = await gateway.completeChat({
  model,
  config: deployment.prompt.config.settings,
  messages: deployment.prompt.messages,
  tools: deployment.prompt.tools
});

initLatestDeployment()

Initialize a cached latest deployment with automatic background refresh. This is the recommended approach for production applications.
async initLatestDeployment(options: InitLatestDeploymentOptions): Promise<DeploymentController>

Parameters

options
object
required

Returns

controller
DeploymentController
A controller object with the following methods:

Example

const controller = await adaline.initLatestDeployment({
  promptId: 'prompt_abc123',
  deploymentEnvironmentId: 'environment_abc123',
  refreshInterval: 60
});

// Get cached deployment (instant, no API call)
const deployment = await controller.get();

// Force fresh fetch (ignores cache, makes API call)
const fresh = await controller.get(true);

// Stop background refresh when done
controller.stop();

initMonitor()

Initialize a monitoring session for logging traces and spans.
initMonitor(options: InitMonitorOptions): Monitor

Parameters

options
object
required

Returns

monitor
Monitor
A Monitor instance for creating traces and spans. See Monitor Class for details.

Example

const monitor = adaline.initMonitor({
  projectId: 'proj_abc123'
});

const trace = monitor.logTrace({ name: 'User Request' });
trace.end();

Complete Example

Here’s a complete example showing all methods working together:
import { Adaline } from '@adaline/client';
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';

const adaline = new Adaline({ debug: true });
const gateway = new Gateway();
const openaiProvider = new OpenAI();

// Initialize deployment controller
const deploymentController = await adaline.initLatestDeployment({
  promptId: 'chatbot-prompt',
  deploymentEnvironmentId: 'environment_abc123',
  refreshInterval: 60
});

// Initialize monitor
const monitor = adaline.initMonitor({
  projectId: 'chatbot-project',
  flushInterval: 5,
  maxBufferSize: 100
});

// Handle chat request
async function handleChat(userId: string, message: string) {
  // Get cached deployment (no API call)
  const deployment = await deploymentController.get();

  // Create trace for this conversation
  const trace = monitor.logTrace({
    name: 'Chat Turn',
    sessionId: userId,
    tags: ['chat', 'production'],
    attributes: { userId, messageLength: message.length }
  });

  // Log LLM call
  const span = trace.logSpan({
    name: 'LLM Completion',
    promptId: deployment.promptId,
    deploymentId: deployment.id,
    runEvaluation: true,
    tags: ['llm', deployment.prompt.config.providerName]
  });

  try {
    // Create model from deployment config
    const model = openaiProvider.chatModel({
      modelName: deployment.prompt.config.model,
      apiKey: process.env.OPENAI_API_KEY!
    });

    // Call LLM using Adaline Gateway
    const gatewayResponse = await gateway.completeChat({
      model,
      config: deployment.prompt.config.settings,
      messages: [
        ...deployment.prompt.messages,
        {
          role: 'user',
          content: [{ modality: 'text', value: message }]
        }
      ],
      tools: deployment.prompt.tools
    });

    const reply = gatewayResponse.response.messages[0].content[0].value;

    // Update span with success
    span.update({
      status: 'success',
      content: {
        type: 'Model',
        provider: deployment.prompt.config.providerName,
        model: deployment.prompt.config.model,
        input: JSON.stringify(gatewayResponse.provider.request),
        output: JSON.stringify(gatewayResponse.provider.response)
      },
      attributes: {
        cached: gatewayResponse.cached,
      }
    });

    trace.update({ status: 'success' });
    return reply;

  } catch (error) {
    span.update({
      status: 'failure',
      attributes: {
        error: error instanceof Error ? error.message : String(error)
      }
    });
    trace.update({ status: 'failure' });
    throw error;

  } finally {
    span.end();
    trace.end();
  }
}

// Graceful shutdown
process.on('SIGTERM', async () => {
  await monitor.flush();
  monitor.stop();
  deploymentController.stop();
});

Type Definitions

interface Logger {
  debug(message: string, ...args: unknown[]): void;
  info(message: string, ...args: unknown[]): void;
  warn(message: string, ...args: unknown[]): void;
  error(message: string, ...args: unknown[]): void;
}

interface AdalineOptions {
  apiKey?: string;
  baseURL?: string;
  logger?: Logger;
  debug?: boolean;
}

interface GetDeploymentOptions {
  promptId: string;
  deploymentId: string;
}

interface GetLatestDeploymentOptions {
  promptId: string;
  deploymentEnvironmentId: string;
}

interface InitLatestDeploymentOptions {
  promptId: string;
  deploymentEnvironmentId: string;
  refreshInterval?: number;
  maxContinuousFailures?: number;
}

interface DeploymentController {
  get: (forceRefresh?: boolean) => Promise<Deployment | undefined>;
  backgroundStatus: () => BackgroundStatus;
  stop: () => void;
}

interface BackgroundStatus {
  stopped: boolean;
  consecutiveFailures: number;
  lastError: Error | null;
  lastRefreshed: Date;
}

interface InitMonitorOptions {
  projectId: string;
  flushInterval?: number;
  maxBufferSize?: number;
  defaultContent?: LogSpanContent;
}

Best Practices

Use Environment Variables

// API key is read from ADALINE_API_KEY by default
const adaline = new Adaline();

Initialize Once, Use Everywhere

let deploymentController: DeploymentController;
let monitor: Monitor;

async function initialize() {
  const adaline = new Adaline();

  deploymentController = await adaline.initLatestDeployment({
    promptId: process.env.PROMPT_ID!,
    deploymentEnvironmentId: process.env.ENVIRONMENT!
  });

  monitor = adaline.initMonitor({
    projectId: process.env.PROJECT_ID!
  });
}

export { deploymentController, monitor };

Handle Graceful Shutdown

async function gracefulShutdown() {
  await monitor.flush();
  monitor.stop();
  deploymentController.stop();
  process.exit(0);
}

process.on('SIGTERM', gracefulShutdown);
process.on('SIGINT', gracefulShutdown);