Skip to main content

Basic Setup

1

Install dependencies

pip install aden[openai]
2

Instrument at startup

Add instrumentation before creating any LLM clients:
from aden import instrument, MeterOptions, create_console_emitter

instrument(MeterOptions(
    emit_metric=create_console_emitter(pretty=True),
))
3

Use your SDK normally

from openai import OpenAI

client = OpenAI()
response = client.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "What is 2+2?"}],
)
print(response.choices[0].message.content)
4

Clean up on shutdown

from aden import uninstrument

# In your shutdown handler
uninstrument()

Multi-Provider Example

Instrument all providers at once:
from aden import instrument, MeterOptions, create_console_emitter
from openai import OpenAI
from anthropic import Anthropic
import google.generativeai as genai

instrument(MeterOptions(
    emit_metric=create_console_emitter(pretty=True),
))

# All providers are now instrumented
openai_client = OpenAI()
anthropic_client = Anthropic()
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
gemini_model = genai.GenerativeModel("gemini-pro")

Async Support

Use instrument_async for async applications:
import asyncio
from aden import instrument_async, MeterOptions, create_console_emitter
from openai import AsyncOpenAI

async def main():
    await instrument_async(MeterOptions(
        emit_metric=create_console_emitter(pretty=True),
    ))

    client = AsyncOpenAI()
    response = await client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": "Hello!"}],
    )
    print(response.choices[0].message.content)

asyncio.run(main())

Streaming Support

Streaming calls are fully supported. Metrics are emitted when the stream completes:
client = OpenAI()
stream = client.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "Write a haiku"}],
    stream=True,
)

for chunk in stream:
    content = chunk.choices[0].delta.content
    if content:
        print(content, end="", flush=True)
# Metrics emitted here after stream completes

Tool Calls

Tool calls are automatically tracked:
tools = [
    {
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "Get weather for a location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {"type": "string"},
                },
            },
        },
    }
]

response = client.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
    tools=tools,
)

# Metric includes:
# tool_calls: [{"type": "function", "name": "get_weather"}]

Production Setup

For production, connect to the Aden control server:
import os
from aden import instrument, MeterOptions

instrument(MeterOptions(
    api_key=os.environ["ADEN_API_KEY"],
    server_url=os.environ.get("ADEN_API_URL"),

    # Track usage per user for budgets
    get_context_id=lambda: get_current_user_id(),

    # Handle alerts
    on_alert=lambda alert: print(f"[{alert.level}] {alert.message}"),
))

Complete Example

import os
from aden import (
    instrument,
    uninstrument,
    MeterOptions,
    create_console_emitter,
    create_batch_emitter,
    create_multi_emitter,
)
from openai import OpenAI


def main():
    # Set up multiple emitters
    emitter = create_multi_emitter([
        # Log to console in development
        create_console_emitter(pretty=True),
        # Batch metrics for efficient network I/O
        create_batch_emitter(
            handler=lambda batch: send_to_backend(batch),
            batch_size=50,
            flush_interval_ms=5000,
        ),
    ])

    instrument(MeterOptions(
        emit_metric=emitter,
        track_tool_calls=True,
    ))

    client = OpenAI()

    # Make API calls...
    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": "Hello, world!"}],
    )

    print(response.choices[0].message.content)

    # Clean up
    uninstrument()


def send_to_backend(batch):
    import requests
    requests.post(
        "https://your-backend.com/metrics",
        json=batch,
        headers={"X-API-Key": os.environ["METRICS_API_KEY"]},
    )


if __name__ == "__main__":
    main()

Next Steps