| 1 | import os |
| 2 | from dotenv import load_dotenv |
| 3 | from opentelemetry import trace |
| 4 | from opentelemetry.sdk.trace import ReadableSpan |
| 5 | from opentelemetry.sdk.trace.export import SpanProcessor |
| 6 | from openinference.instrumentation.agentspec import AgentSpecInstrumentor |
| 7 | from pyagentspec.adapters.langgraph import AgentSpecLoader |
| 8 | from pyagentspec.agent import Agent |
| 9 | from pyagentspec.llms import OpenAiConfig |
| 10 | from respan_instrumentation_openinference._translator import OpenInferenceTranslator |
| 11 | from respan_tracing import RespanTelemetry |
| 12 | |
| 13 | load_dotenv(override=True) |
| 14 | |
| 15 | RESPAN_API_KEY = os.getenv("RESPAN_API_KEY") |
| 16 | RESPAN_BASE_URL = (os.getenv("RESPAN_BASE_URL") or "https://api.respan.ai/api").rstrip("/") |
| 17 | |
| 18 | # Route OpenAI-compatible AgentSpec calls through the Respan gateway. |
| 19 | os.environ["OPENAI_API_KEY"] = RESPAN_API_KEY |
| 20 | os.environ["OPENAI_BASE_URL"] = RESPAN_BASE_URL |
| 21 | |
| 22 | telemetry = RespanTelemetry( |
| 23 | app_name="agentspec-openinference-example", |
| 24 | api_key=RESPAN_API_KEY, |
| 25 | is_batching_enabled=False, |
| 26 | instruments=set(), |
| 27 | ) |
| 28 | |
| 29 | class TranslatingProcessor(SpanProcessor): |
| 30 | """Translate OpenInference attributes inline before export.""" |
| 31 | |
| 32 | def __init__(self, translator: OpenInferenceTranslator, inner: SpanProcessor): |
| 33 | self._translator = translator |
| 34 | self._inner = inner |
| 35 | |
| 36 | def on_start(self, span, parent_context=None): |
| 37 | self._inner.on_start(span, parent_context) |
| 38 | |
| 39 | def on_end(self, span: ReadableSpan): |
| 40 | self._translator.on_end(span) |
| 41 | self._inner.on_end(span) |
| 42 | |
| 43 | def shutdown(self): |
| 44 | self._inner.shutdown() |
| 45 | |
| 46 | def force_flush(self, timeout_millis: int = 30000): |
| 47 | return self._inner.force_flush(timeout_millis) |
| 48 | |
| 49 | # AgentSpec maintains its own tracing runtime, so wrap the active processors |
| 50 | # directly instead of using OpenInferenceInstrumentor(...) or @workflow/@task. |
| 51 | tp = trace.get_tracer_provider() |
| 52 | asp = getattr(tp, "_active_span_processor", None) |
| 53 | original_processors = list(getattr(asp, "_span_processors", ())) |
| 54 | |
| 55 | translator = OpenInferenceTranslator() |
| 56 | asp._span_processors = tuple( |
| 57 | TranslatingProcessor(translator, proc) for proc in original_processors |
| 58 | ) |
| 59 | |
| 60 | instrumentor = AgentSpecInstrumentor() |
| 61 | instrumentor.instrument(tracer_provider=tp) |
| 62 | |
| 63 | agent = Agent( |
| 64 | name="haiku_assistant", |
| 65 | description="A helpful assistant that writes haikus.", |
| 66 | llm_config=OpenAiConfig( |
| 67 | name="respan-openai", |
| 68 | model_id="gpt-4o-mini", |
| 69 | api_key=RESPAN_API_KEY, |
| 70 | ), |
| 71 | system_prompt="You are a helpful assistant. Respond only with a haiku.", |
| 72 | ) |
| 73 | |
| 74 | langgraph_agent = AgentSpecLoader().load_component(agent) |
| 75 | |
| 76 | try: |
| 77 | result = langgraph_agent.invoke( |
| 78 | input={ |
| 79 | "messages": [ |
| 80 | {"role": "user", "content": "Write a haiku about recursion in programming."} |
| 81 | ] |
| 82 | }, |
| 83 | ) |
| 84 | print(result["messages"][-1].content) |
| 85 | finally: |
| 86 | telemetry.flush() |
| 87 | instrumentor.uninstrument() |