Skip to main content

Overview

The Aden Python SDK integrates seamlessly with popular AI frameworks. Because Aden instruments the underlying LLM SDKs (OpenAI, Anthropic, Gemini), any framework that uses these SDKs is automatically tracked.

PydanticAI

Type-safe agents with Pydantic models

PydanticAI

PydanticAI is a Python agent framework that uses OpenAI, Anthropic, and other LLM providers under the hood. Aden’s global instrumentation automatically captures all LLM calls made by PydanticAI agents.

Setup

from aden import instrument, uninstrument, MeterOptions, create_console_emitter
from pydantic_ai import Agent

# Instrument BEFORE creating agents
instrument(MeterOptions(
    emit_metric=create_console_emitter(pretty=True),
    track_tool_calls=True,
))

Basic Agent

from pydantic_ai import Agent

agent = Agent(
    "openai:gpt-4o-mini",
    system_prompt="You are a helpful assistant. Keep responses brief.",
)

result = await agent.run("What is the capital of France?")
print(result.output)
# Metrics automatically captured: latency, tokens, model, etc.

Structured Output

PydanticAI excels at structured output with Pydantic models:
from pydantic import BaseModel
from pydantic_ai import Agent

class TaskAnalysis(BaseModel):
    task: str
    complexity: str  # low, medium, high
    estimated_steps: int
    required_tools: list[str]
    recommendation: str

agent = Agent(
    "openai:gpt-4o-mini",
    output_type=TaskAnalysis,
    system_prompt="Analyze the given task and provide a structured analysis.",
)

result = await agent.run("Build a REST API for user authentication")
analysis = result.output

print(f"Task: {analysis.task}")
print(f"Complexity: {analysis.complexity}")
print(f"Steps: {analysis.estimated_steps}")
print(f"Tools: {', '.join(analysis.required_tools)}")

Tool Calling

from pydantic_ai import Agent, RunContext

agent = Agent(
    "openai:gpt-4o-mini",
    system_prompt="You are a helpful assistant with access to weather data.",
)

@agent.tool
async def get_weather(ctx: RunContext[None], location: str) -> str:
    """Get current weather for a location."""
    # Your weather API implementation
    return f"Weather in {location}: 72°F, Sunny, Humidity: 45%"

@agent.tool
async def get_forecast(ctx: RunContext[None], location: str, days: int = 3) -> str:
    """Get weather forecast for a location."""
    return f"{days}-day forecast for {location}: Mostly sunny."

result = await agent.run("What's the weather like in San Francisco?")
print(result.output)
# Tool calls tracked with tool_call_count and tool_names in metrics

Multi-Provider

PydanticAI supports multiple LLM providers, all tracked by Aden:
# OpenAI agent
openai_agent = Agent(
    "openai:gpt-4o-mini",
    system_prompt="You are a creative writer. Be concise.",
)

# Anthropic agent
anthropic_agent = Agent(
    "anthropic:claude-3-5-haiku-latest",
    system_prompt="You are a technical analyst. Be precise.",
)

# Gemini agent
gemini_agent = Agent(
    "gemini-1.5-flash",
    system_prompt="You are a helpful assistant.",
)

# All calls automatically tracked
creative = await openai_agent.run("Describe a sunset in one sentence.")
technical = await anthropic_agent.run("Explain why the sky appears red during sunset.")

Streaming

agent = Agent(
    "openai:gpt-4o-mini",
    system_prompt="You are a storyteller.",
)

async with agent.run_stream("Tell me a very short story about a robot.") as result:
    async for text in result.stream_text():
        print(text, end="", flush=True)
# Streaming metrics captured with final token counts

Multi-Agent Workflow

# Research agent
researcher = Agent(
    "openai:gpt-4o-mini",
    system_prompt="You are a researcher. Gather key facts briefly.",
)

# Writer agent
writer = Agent(
    "openai:gpt-4o-mini",
    system_prompt="You are a writer. Create content from research notes.",
)

# Step 1: Research
research_result = await researcher.run(
    "Research the key benefits of renewable energy. List 3 bullet points."
)
# → trace_id: "abc123", call_sequence: 1

# Step 2: Write based on research
write_result = await writer.run(
    f"Write a brief paragraph based on these notes:\n{research_result.output}"
)
# → trace_id: "abc123", call_sequence: 2, parent_span_id links to research call

print(write_result.output)

Dependency Injection

PydanticAI’s dependency injection is fully tracked:
from typing import Any
from pydantic_ai import Agent, RunContext

class UserContext:
    def __init__(self, user_id: str, preferences: dict[str, Any]) -> None:
        self.user_id = user_id
        self.preferences = preferences

agent = Agent(
    "openai:gpt-4o-mini",
    deps_type=UserContext,
    system_prompt="You are a personalized assistant.",
)

@agent.system_prompt
async def add_user_context(ctx: RunContext[UserContext]) -> str:
    prefs = ctx.deps.preferences
    return f"User preferences: {prefs}. Tailor your response accordingly."

user_ctx = UserContext(
    user_id="user_123",
    preferences={"style": "casual", "detail_level": "brief"},
)

result = await agent.run(
    "Recommend a good book to read.",
    deps=user_ctx,
)

Complete Example

Here’s a complete example showing Aden with PydanticAI:
import asyncio
from pydantic import BaseModel
from pydantic_ai import Agent, RunContext
from aden import (
    instrument,
    uninstrument,
    MeterOptions,
    create_console_emitter,
    create_http_transport,
    create_multi_emitter,
)


class WeatherResponse(BaseModel):
    location: str
    temperature: float
    conditions: str


async def main():
    # Initialize instrumentation
    instrument(MeterOptions(
        emit_metric=create_multi_emitter([
            create_console_emitter(pretty=True),
            create_http_transport(
                api_url="https://api.yourcompany.com/v1/metrics",
                batch_size=50,
            ),
        ]),
        track_tool_calls=True,
    ))

    try:
        # Create agent with structured output
        weather_agent = Agent(
            "openai:gpt-4o-mini",
            output_type=WeatherResponse,
            system_prompt="Provide weather information.",
        )

        @weather_agent.tool
        async def get_current_weather(
            ctx: RunContext[None],
            location: str
        ) -> str:
            """Get current weather for a location."""
            # Simulated API call
            return f"Current weather in {location}: 72°F, Sunny"

        # Run the agent
        result = await weather_agent.run(
            "What's the weather in San Francisco?"
        )

        print(f"Location: {result.output.location}")
        print(f"Temperature: {result.output.temperature}°F")
        print(f"Conditions: {result.output.conditions}")

    finally:
        uninstrument()


if __name__ == "__main__":
    asyncio.run(main())

Async Instrumentation

For async applications with a control server, use instrument_async():
from aden import instrument_async, uninstrument_async, MeterOptions

async def main():
    result = await instrument_async(MeterOptions(
        api_key=os.environ.get("ADEN_API_KEY"),
        track_tool_calls=True,
    ))
    print(f"Instrumented: openai={result.openai}, anthropic={result.anthropic}")

    try:
        # Your PydanticAI agents here
        agent = Agent("openai:gpt-4o-mini")
        await agent.run("Hello!")
    finally:
        await uninstrument_async()

Framework Support

FrameworkStatusHow it works
PydanticAIFully supportedInstruments underlying OpenAI/Anthropic SDKs
LangChainComing soonWill instrument via SDK patching
LlamaIndexComing soonWill instrument via SDK patching
Any Python framework that uses the official OpenAI, Anthropic, or Google Generative AI SDKs will automatically work with Aden’s global instrumentation.

Cleanup

Always clean up instrumentation on shutdown:
from aden import uninstrument

# On application shutdown
uninstrument()

# Or use a context manager pattern
try:
    # Your application code
    pass
finally:
    uninstrument()

Next Steps