from fallom import prompts# Get prompt from A/B test (sticky assignment based on session_id)prompt = prompts.get_ab("onboarding-test", session_id, variables={ "user_name": "John"})# prompt.ab_test_key and prompt.variant_index are set# for analytics in your dashboard
When you call prompts.get() or prompts.get_ab(), the next LLM call is automatically tagged with the prompt information. This allows you to see which prompts are used in your traces without any extra code.
# Get prompt - sets up auto-tagging for next LLM callprompt = prompts.get("onboarding", variables={"user_name": "John"})# This call is automatically tagged with prompt_key, prompt_version, etc.response = client.chat.completions.create( model="gpt-4o", messages=[ {"role": "system", "content": prompt.system}, {"role": "user", "content": prompt.user} ])
import { prompts } from "@fallom/trace";// Get prompt from A/B test (sticky assignment based on sessionId)const prompt = await prompts.getAB("onboarding-test", sessionId, { variables: { userName: "John" },});// prompt.abTestKey and prompt.variantIndex are set// for analytics in your dashboard
When you call prompts.get() or prompts.getAB(), the next LLM call is automatically tagged with the prompt information. This allows you to see which prompts are used in your traces without any extra code.
// Get prompt - sets up auto-tagging for next LLM callconst prompt = await prompts.get("onboarding", { variables: { userName: "John" },});// This call is automatically tagged with promptKey, promptVersion, etc.const response = await openai.chat.completions.create({ model: "gpt-4o", messages: [ { role: "system", content: prompt.system }, { role: "user", content: prompt.user }, ],});