Skip to main content
Fallom integrates with LangChain to trace all your chain and agent LLM calls automatically.
Get your API key from the dashboard.

Installation

pip install fallom langchain langchain-openai opentelemetry-instrumentation-openai

Quick Start

# Initialize Fallom FIRST
import fallom
fallom.init(api_key="your-api-key")

# Now import LangChain
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

# Set session context for tracing
fallom.trace.set_session("langchain-app", session_id)

# Create your LangChain components - LLM calls are automatically traced
llm = ChatOpenAI(model="gpt-4o")
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful assistant."),
    ("user", "{input}")
])

chain = prompt | llm

# All LLM calls in the chain are traced
response = chain.invoke({"input": "What is the capital of France?"})

Model A/B Testing with LangChain

Test different models in your LangChain applications:
import fallom
from fallom import models

fallom.init(api_key="your-api-key")

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

# Get assigned model for this session
model_id = models.get("langchain-app", session_id, fallback="gpt-4o")

fallom.trace.set_session("langchain-app", session_id)

# Use the assigned model
llm = ChatOpenAI(model=model_id)
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful assistant."),
    ("user", "{input}")
])

chain = prompt | llm
response = chain.invoke({"input": "Summarize this document"})

LangChain Agents with Fallom

Trace your LangChain agents:
import fallom
fallom.init(api_key="your-api-key")

from langchain_openai import ChatOpenAI
from langchain.agents import create_react_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.tools import DuckDuckGoSearchRun

fallom.trace.set_session("langchain-agent", session_id)

llm = ChatOpenAI(model="gpt-4o")
tools = [DuckDuckGoSearchRun()]

# Create and run the agent - all LLM calls are traced
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

response = agent_executor.invoke({"input": "What's the latest news about AI?"})

Prompt Management with LangChain

Use Fallom’s managed prompts with LangChain:
import fallom
from fallom import prompts

fallom.init(api_key="your-api-key")

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

# Get managed prompt
prompt_config = prompts.get("assistant-prompt", variables={
    "persona": "helpful assistant"
})

fallom.trace.set_session("langchain-app", session_id)

llm = ChatOpenAI(model="gpt-4o")
prompt = ChatPromptTemplate.from_messages([
    ("system", prompt_config.system),
    ("user", "{input}")
])

chain = prompt | llm
response = chain.invoke({"input": "Help me write an email"})

Next Steps