Get your API key from the dashboard.
Installation
- Python
- TypeScript
Copy
pip install fallom langchain langchain-openai opentelemetry-instrumentation-openai
Copy
npm install @fallom/trace langchain @langchain/openai
Quick Start
- Python
- TypeScript
Copy
# Initialize Fallom FIRST
import fallom
fallom.init(api_key="your-api-key")
# Now import LangChain
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
# Set session context for tracing
fallom.trace.set_session("langchain-app", session_id)
# Create your LangChain components - LLM calls are automatically traced
llm = ChatOpenAI(model="gpt-4o")
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("user", "{input}")
])
chain = prompt | llm
# All LLM calls in the chain are traced
response = chain.invoke({"input": "What is the capital of France?"})
Copy
import fallom from "@fallom/trace";
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
// Initialize Fallom
await fallom.init({ apiKey: "your-api-key" });
// Wrap OpenAI for tracing
const model = fallom.trace.wrapOpenAI(new ChatOpenAI({ model: "gpt-4o" }));
fallom.trace.setSession("langchain-app", sessionId);
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant."],
["user", "{input}"]
]);
const chain = prompt.pipe(model);
// All LLM calls in the chain are traced
const response = await chain.invoke({ input: "What is the capital of France?" });
Model A/B Testing with LangChain
Test different models in your LangChain applications:- Python
- TypeScript
Copy
import fallom
from fallom import models
fallom.init(api_key="your-api-key")
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
# Get assigned model for this session
model_id = models.get("langchain-app", session_id, fallback="gpt-4o")
fallom.trace.set_session("langchain-app", session_id)
# Use the assigned model
llm = ChatOpenAI(model=model_id)
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("user", "{input}")
])
chain = prompt | llm
response = chain.invoke({"input": "Summarize this document"})
Copy
import fallom, { models } from "@fallom/trace";
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
await fallom.init({ apiKey: "your-api-key" });
// Get assigned model for this session
const modelId = await models.get("langchain-app", sessionId, {
fallback: "gpt-4o"
});
fallom.trace.setSession("langchain-app", sessionId);
const model = new ChatOpenAI({ model: modelId });
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant."],
["user", "{input}"]
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({ input: "Summarize this document" });
LangChain Agents with Fallom
Trace your LangChain agents:- Python
- TypeScript
Copy
import fallom
fallom.init(api_key="your-api-key")
from langchain_openai import ChatOpenAI
from langchain.agents import create_react_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.tools import DuckDuckGoSearchRun
fallom.trace.set_session("langchain-agent", session_id)
llm = ChatOpenAI(model="gpt-4o")
tools = [DuckDuckGoSearchRun()]
# Create and run the agent - all LLM calls are traced
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
response = agent_executor.invoke({"input": "What's the latest news about AI?"})
Copy
import fallom from "@fallom/trace";
import { ChatOpenAI } from "@langchain/openai";
import { createReactAgent, AgentExecutor } from "langchain/agents";
await fallom.init({ apiKey: "your-api-key" });
fallom.trace.setSession("langchain-agent", sessionId);
const model = new ChatOpenAI({ model: "gpt-4o" });
// Create and run the agent - all LLM calls are traced
const agent = await createReactAgent({ llm: model, tools, prompt });
const agentExecutor = new AgentExecutor({ agent, tools });
const response = await agentExecutor.invoke({
input: "What's the latest news about AI?"
});
Prompt Management with LangChain
Use Fallom’s managed prompts with LangChain:- Python
- TypeScript
Copy
import fallom
from fallom import prompts
fallom.init(api_key="your-api-key")
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
# Get managed prompt
prompt_config = prompts.get("assistant-prompt", variables={
"persona": "helpful assistant"
})
fallom.trace.set_session("langchain-app", session_id)
llm = ChatOpenAI(model="gpt-4o")
prompt = ChatPromptTemplate.from_messages([
("system", prompt_config.system),
("user", "{input}")
])
chain = prompt | llm
response = chain.invoke({"input": "Help me write an email"})
Copy
import fallom, { prompts } from "@fallom/trace";
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
await fallom.init({ apiKey: "your-api-key" });
// Get managed prompt
const promptConfig = await prompts.get("assistant-prompt", {
variables: { persona: "helpful assistant" }
});
fallom.trace.setSession("langchain-app", sessionId);
const model = new ChatOpenAI({ model: "gpt-4o" });
const prompt = ChatPromptTemplate.fromMessages([
["system", promptConfig.system],
["user", "{input}"]
]);
const chain = prompt.pipe(model);
const response = await chain.invoke({ input: "Help me write an email" });

