Documentation Index Fetch the complete documentation index at: https://www.adaline.ai/docs/llms.txt
Use this file to discover all available pages before exploring further.
Variables on spans aren’t limited to plain text. You can attach images, PDFs, and large text summaries as variable values, making them available in the Playground , in datasets , and for continuous evaluations .
URL vs base64
Attachments support two modes — URL and base64 — and the distinction matters:
Mode What happens Usable in Playground & datasets URL Adaline stores the link. The file must remain publicly accessible. Only while the URL is live. base64 Adaline receives the raw data and hosts it in its own storage. Yes — fully persisted and self-contained.
Images
Attach images to spans as variables with modality: "image". The detail field controls resolution processing (auto, low, medium, high).
Base64
import { readFileSync } from "fs" ;
const imageBase64 = readFileSync ( "./product-screenshot.png" ). toString ( "base64" );
const span = trace . logSpan ({ name: "analyze-image" });
span . update ({
status: "success" ,
content: {
type: "Model" ,
provider: "openai" ,
model: "gpt-4o" ,
variables: {
product_image: {
name: "product_image" ,
value: {
modality: "image" ,
detail: "auto" ,
value: { type: "base64" , base64: imageBase64 , mediaType: "png" },
},
},
},
input: JSON . stringify ( params ),
output: JSON . stringify ( response ),
},
});
import base64
from adaline_api.models.log_span_content import LogSpanContent
from adaline_api.models.log_span_model_content import LogSpanModelContent
with open ( "./product-screenshot.png" , "rb" ) as f:
image_base64 = base64.b64encode(f.read()).decode( "utf-8" )
span = trace.log_span( name = "analyze-image" )
span.update({
"status" : "success" ,
"content" : LogSpanContent(
actual_instance = LogSpanModelContent(
type = "Model" ,
provider = "openai" ,
model = "gpt-4o" ,
variables = {
"product_image" : {
"modality" : "image" ,
"detail" : "auto" ,
"value" : { "type" : "base64" , "base64" : image_base64, "mediaType" : "png" },
},
},
input = json.dumps(params),
output = json.dumps(response),
)
),
})
curl -X POST https://api.adaline.ai/v2/logs/span \
-H "Authorization: Bearer $ADALINE_API_KEY " \
-H "Content-Type: application/json" \
-d '{
"projectId": "your-project-id",
"traceReferenceId": "your-trace-ref",
"span": {
"name": "analyze-image",
"status": "success",
"referenceId": "span-img-001",
"startedAt": 1700000000000,
"endedAt": 1700000002000,
"content": { "type": "Other", "input": "{}", "output": "{}" },
"variables": {
"product_image": {
"modality": "image",
"detail": "auto",
"value": { "type": "base64", "base64": "<base64-encoded-data>", "mediaType": "png" }
}
}
}
}'
Supported media types: png, jpeg, webp, gif.
URL
When the image is already hosted publicly, you can pass a URL. Adaline stores the link but does not download or host the file.
const span = trace . logSpan ({ name: "analyze-image" });
span . update ({
status: "success" ,
content: {
type: "Model" ,
provider: "openai" ,
model: "gpt-4o" ,
variables: {
product_image: {
name: "product_image" ,
value: {
modality: "image" ,
detail: "auto" ,
value: { type: "url" , url: "https://cdn.example.com/product.png" },
},
},
},
input: JSON . stringify ( params ),
output: JSON . stringify ( response ),
},
});
span = trace.log_span( name = "analyze-image" )
span.update({
"status" : "success" ,
"content" : LogSpanContent(
actual_instance = LogSpanModelContent(
type = "Model" ,
provider = "openai" ,
model = "gpt-4o" ,
variables = {
"product_image" : {
"modality" : "image" ,
"detail" : "auto" ,
"value" : { "type" : "url" , "url" : "https://cdn.example.com/product.png" },
},
},
input = json.dumps(params),
output = json.dumps(response),
)
),
})
PDFs
Attach PDF documents with modality: "pdf". PDFs include a file object with metadata (name, id, and optionally size).
Base64
import { readFileSync } from "fs" ;
const pdfBase64 = readFileSync ( "./invoice.pdf" ). toString ( "base64" );
const span = trace . logSpan ({ name: "process-document" });
span . update ({
status: "success" ,
content: {
type: "Model" ,
provider: "openai" ,
model: "gpt-4o" ,
variables: {
invoice: {
name: "invoice" ,
value: {
modality: "pdf" ,
value: { type: "base64" , base64: pdfBase64 },
file: { name: "invoice.pdf" , id: "doc-001" },
},
},
},
input: JSON . stringify ( params ),
output: JSON . stringify ( response ),
},
});
import base64
with open ( "./invoice.pdf" , "rb" ) as f:
pdf_base64 = base64.b64encode(f.read()).decode( "utf-8" )
span = trace.log_span( name = "process-document" )
span.update({
"status" : "success" ,
"content" : LogSpanContent(
actual_instance = LogSpanModelContent(
type = "Model" ,
provider = "openai" ,
model = "gpt-4o" ,
variables = {
"invoice" : {
"modality" : "pdf" ,
"value" : { "type" : "base64" , "base64" : pdf_base64},
"file" : { "name" : "invoice.pdf" , "id" : "doc-001" },
},
},
input = json.dumps(params),
output = json.dumps(response),
)
),
})
curl -X POST https://api.adaline.ai/v2/logs/span \
-H "Authorization: Bearer $ADALINE_API_KEY " \
-H "Content-Type: application/json" \
-d '{
"projectId": "your-project-id",
"traceReferenceId": "your-trace-ref",
"span": {
"name": "process-document",
"status": "success",
"referenceId": "span-pdf-001",
"startedAt": 1700000000000,
"endedAt": 1700000003000,
"content": { "type": "Other", "input": "{}", "output": "{}" },
"variables": {
"invoice": {
"modality": "pdf",
"value": { "type": "base64", "base64": "<base64-encoded-data>" },
"file": { "name": "invoice.pdf", "id": "doc-001" }
}
}
}
}'
URL
const span = trace . logSpan ({ name: "process-document" });
span . update ({
status: "success" ,
content: {
type: "Model" ,
provider: "openai" ,
model: "gpt-4o" ,
variables: {
invoice: {
name: "invoice" ,
value: {
modality: "pdf" ,
value: { type: "url" , url: "https://cdn.example.com/invoice.pdf" },
file: { name: "invoice.pdf" , id: "doc-001" },
},
},
},
input: JSON . stringify ( params ),
output: JSON . stringify ( response ),
},
});
span = trace.log_span( name = "process-document" )
span.update({
"status" : "success" ,
"content" : LogSpanContent(
actual_instance = LogSpanModelContent(
type = "Model" ,
provider = "openai" ,
model = "gpt-4o" ,
variables = {
"invoice" : {
"modality" : "pdf" ,
"value" : { "type" : "url" , "url" : "https://cdn.example.com/invoice.pdf" },
"file" : { "name" : "invoice.pdf" , "id" : "doc-001" },
},
},
input = json.dumps(params),
output = json.dumps(response),
)
),
})
Text
For large text — retrieved context, summaries, full documents — use text variables or attributes. Variables are the better choice when the text needs to flow into datasets and evaluations . Attributes work well for shorter metadata you want to filter and search by in the Monitor .
Via variables
const span = trace . logSpan ({ name: "rag-response" });
span . update ({
status: "success" ,
content: {
type: "Model" ,
provider: "openai" ,
model: "gpt-4o" ,
variables: {
user_question: { name: "user_question" , value: { modality: "text" , value: userQuery } },
retrieved_context: { name: "retrieved_context" , value: { modality: "text" , value: longContextString } },
system_prompt: { name: "system_prompt" , value: { modality: "text" , value: systemPromptText } },
},
input: JSON . stringify ( params ),
output: JSON . stringify ( response ),
},
});
span = trace.log_span( name = "rag-response" )
span.update({
"status" : "success" ,
"content" : LogSpanContent(
actual_instance = LogSpanModelContent(
type = "Model" ,
provider = "openai" ,
model = "gpt-4o" ,
variables = {
"user_question" : { "modality" : "text" , "value" : user_query},
"retrieved_context" : { "modality" : "text" , "value" : long_context_string},
"system_prompt" : { "modality" : "text" , "value" : system_prompt_text},
},
input = json.dumps(params),
output = json.dumps(response),
)
),
})
Via attributes
const span = trace . logSpan ({
name: "rag-response" ,
attributes: {
user_question: userQuery ,
retrieved_context: longContextString ,
summary: documentSummary ,
},
});
span = trace.log_span(
name = "rag-response" ,
attributes = {
"user_question" : user_query,
"retrieved_context" : long_context_string,
"summary" : document_summary,
},
)
When using the Proxy , pass variables (including attachments) via the adaline-span-variables header:
headers[ "adaline-span-variables" ] = json.dumps({
"product_image" : {
"modality" : "image" ,
"detail" : "auto" ,
"value" : { "type" : "base64" , "base64" : image_base64, "mediaType" : "png" },
},
"user_question" : {
"modality" : "text" ,
"value" : "What color options are available?" ,
},
})
Limits
Attachment type Max size Image (base64) 10 MB PDF (base64) 10 MB Request body (total) 32 MB
See Limits for full payload constraints. Requests exceeding these limits receive a 413 Payload Too Large response.
If your files exceed these limits, you can either self-host the media and use URL referencing instead of base64, or contact support@adaline.ai to discuss higher limits for your workspace.
Next steps
Analyze Log Traces Filter and search traces using your attached data.
Build Datasets from Logs Capture variable-enriched spans into evaluation datasets.