# Initialize Fallom FIRSTimport fallomfallom.init(api_key="your-api-key")# Now import Agno and your LLM providerfrom agno.agent import Agentfrom agno.models.openai import OpenAIChat# Set session context for tracingfallom.trace.set_session("my-agent", session_id)# Create your Agno agent - LLM calls are automatically tracedagent = Agent( model=OpenAIChat(id="gpt-4o"), instructions="You are a helpful assistant.")# All LLM calls within the agent are tracedresponse = agent.run("What is the capital of France?")
import fallomfrom fallom import modelsfallom.init(api_key="your-api-key")from agno.agent import Agentfrom agno.models.openai import OpenAIChat# Get assigned model for this sessionmodel_id = models.get("agno-agent", session_id, fallback="gpt-4o")fallom.trace.set_session("agno-agent", session_id)# Use the assigned model with your agentagent = Agent( model=OpenAIChat(id=model_id), instructions="You are a helpful assistant.")response = agent.run("Explain quantum computing")