Skip to main content
  1. Sign up — Create an account at platform.respan.ai
  2. Create an API key — Generate one on the API keys page
  3. Add credits or a provider key — Add credits on the Credits page or connect your own provider key on the Integrations page
Add the Docs MCP to your AI coding tool to get help building with Respan. No API key needed.
{
  "mcpServers": {
    "respan-docs": {
      "url": "https://docs.respan.ai/mcp"
    }
  }
}

What is OpenAI Agents SDK?

The OpenAI Agents SDK is a lightweight framework for building multi-agent workflows with tools, handoffs, and guardrails. It provides a built-in tracing system that captures agent runs, LLM generations, tool calls, and agent-to-agent handoffs.

Setup

Configuration

Examples

Tool calls

Tool calls are automatically captured as spans with inputs, outputs, and timing.
from agents import Agent, Runner, set_trace_processors, function_tool
from respan_exporter_openai_agents import RespanTraceProcessor

set_trace_processors([RespanTraceProcessor()])

@function_tool
def get_weather(city: str) -> str:
    return f"The weather in {city} is sunny, 72°F"

agent = Agent(
    name="Weather Agent",
    instructions="Help users check the weather.",
    tools=[get_weather],
)

result = Runner.run_sync(agent, "What's the weather in San Francisco?")
print(result.final_output)

Handoffs

Agent-to-agent handoffs are traced with full context.
from agents import Agent, Runner, set_trace_processors
from respan_exporter_openai_agents import RespanTraceProcessor

set_trace_processors([RespanTraceProcessor()])

billing_agent = Agent(
    name="Billing Agent",
    instructions="Handle billing questions.",
)

support_agent = Agent(
    name="Support Agent",
    instructions="Route billing questions to the billing agent.",
    handoffs=[billing_agent],
)

result = Runner.run_sync(support_agent, "I have a billing question")
print(result.final_output)

Agents as tools

Instead of handoffs (where the new agent takes over), you can use agents as tools — the tool agent runs independently and returns its result to the caller.
from agents import Agent, Runner, set_trace_processors, trace
from respan_exporter_openai_agents import RespanTraceProcessor

set_trace_processors([RespanTraceProcessor()])

spanish_agent = Agent(
    name="spanish_agent",
    instructions="You translate the user's message to Spanish",
)

french_agent = Agent(
    name="french_agent",
    instructions="You translate the user's message to French",
)

orchestrator = Agent(
    name="orchestrator",
    instructions="You are a translation agent. Use the provided tools to translate.",
    tools=[
        spanish_agent.as_tool(
            tool_name="translate_to_spanish",
            tool_description="Translate to Spanish",
        ),
        french_agent.as_tool(
            tool_name="translate_to_french",
            tool_description="Translate to French",
        ),
    ],
)

with trace("Translation orchestrator"):
    result = Runner.run_sync(orchestrator, "Translate 'hello' to Spanish and French")
    print(result.final_output)

Guardrails

Run input guardrails in parallel with the agent to quickly reject invalid inputs.
from agents import (
    Agent, Runner, set_trace_processors,
    input_guardrail, GuardrailFunctionOutput,
    InputGuardrailTripwireTriggered, RunContextWrapper,
)
from pydantic import BaseModel
from respan_exporter_openai_agents import RespanTraceProcessor

set_trace_processors([RespanTraceProcessor()])

class MathHomeworkOutput(BaseModel):
    is_math_homework: bool
    reasoning: str

guardrail_agent = Agent(
    name="Guardrail check",
    instructions="Check if the user is asking you to do their math homework.",
    output_type=MathHomeworkOutput,
)

@input_guardrail
async def math_guardrail(context, agent, input):
    result = await Runner.run(guardrail_agent, input, context=context.context)
    output = result.final_output_as(MathHomeworkOutput)
    return GuardrailFunctionOutput(
        output_info=output,
        tripwire_triggered=output.is_math_homework,
    )

agent = Agent(
    name="Support Agent",
    instructions="You are a customer support agent.",
    input_guardrails=[math_guardrail],
)

try:
    result = await Runner.run(agent, "Solve for x: 2x + 5 = 11")
except InputGuardrailTripwireTriggered:
    print("Guardrail triggered — input rejected")

Streaming

Stream agent responses with real-time text deltas.
from openai.types.responses import ResponseTextDeltaEvent
from agents import Agent, Runner

agent = Agent(name="Joker", instructions="You tell jokes.")

result = Runner.run_streamed(agent, input="Tell me 3 jokes.")
async for event in result.stream_events():
    if event.type == "raw_response_event" and isinstance(
        event.data, ResponseTextDeltaEvent
    ):
        print(event.data.delta, end="", flush=True)

Parallelization

Run multiple agents concurrently and pick the best result.
from agents import Agent, Runner, ItemHelpers, trace
import asyncio

spanish_agent = Agent(name="translator", instructions="Translate to Spanish")
picker = Agent(name="picker", instructions="Pick the best translation")

with trace("Parallel translation"):
    res_1, res_2, res_3 = await asyncio.gather(
        Runner.run(spanish_agent, "Good morning"),
        Runner.run(spanish_agent, "Good morning"),
        Runner.run(spanish_agent, "Good morning"),
    )

    outputs = [ItemHelpers.text_message_outputs(r.new_items) for r in [res_1, res_2, res_3]]
    best = await Runner.run(picker, f"Translations:\n{chr(10).join(outputs)}")
    print(best.final_output)

Deterministic flows

Sequential agent pipeline with validation gates.
from pydantic import BaseModel
from agents import Agent, Runner, trace

outline_agent = Agent(name="outliner", instructions="Generate a story outline.")

class CheckResult(BaseModel):
    good_quality: bool
    is_scifi: bool

checker_agent = Agent(name="checker", instructions="Judge outline quality.", output_type=CheckResult)
writer_agent = Agent(name="writer", instructions="Write a short story from the outline.")

with trace("Story pipeline"):
    outline = await Runner.run(outline_agent, "A robot discovers emotions")
    check = await Runner.run(checker_agent, outline.final_output)

    if check.final_output.good_quality and check.final_output.is_scifi:
        story = await Runner.run(writer_agent, outline.final_output)
        print(story.final_output)

Dynamic system prompts

Use a function for instructions to customize prompts based on runtime context.
from agents import Agent, Runner, RunContextWrapper, set_trace_processors
from respan_exporter_openai_agents import RespanTraceProcessor

set_trace_processors([RespanTraceProcessor()])

class CustomContext:
    def __init__(self, style: str):
        self.style = style

def custom_instructions(
    run_context: RunContextWrapper[CustomContext], agent: Agent
) -> str:
    if run_context.context.style == "haiku":
        return "Only respond in haikus."
    elif run_context.context.style == "pirate":
        return "Respond as a pirate."
    return "Respond normally."

agent = Agent(name="Chat agent", instructions=custom_instructions)

result = await Runner.run(
    agent, "Tell me a joke.", context=CustomContext(style="pirate")
)
print(result.final_output)
Looking for the gateway integration? See Gateway > OpenAI Agents SDK.