Getting a prompt is the first step to use it in your code. You can get a prompt by its name.
import osfrom literalai import LiteralClientclient = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))# This will fetch the champion version, you can also pass a specific versionprompt =await client.api.get_prompt(name="Default")# Synchronous versionprompt = client.api.get_prompt_sync(name="Default")
Once you got your prompt, you can format it to get messages in the OpenAI format.
Combining prompts with integrations (like the OpenAI integration) allows you to log the generations and to track which prompt versions were used to generate them.
import osimport asynciofrom literalai import LiteralClientfrom openai import AsyncOpenAIopenai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))# Optionally instrument the openai client to log generationsliteral_client.instrument_openai()asyncdefmain(): prompt =await literal_client.api.get_prompt(name="Default")# Optionally pass variables to the prompt variables ={"foo":"bar"} messages = prompt.format(variables) stream =await openai_client.chat.completions.create( messages=messages,# Optionally pass the tools defined in the prompt tools=prompt.tools,# Pass the settings defined in the prompt**prompt.settings, stream=True)asyncfor chunk in stream:print(chunk)loop = asyncio.get_event_loop()loop.run_until_complete(main())