The full source code for this guide can be found in our cookbooks repository.

This code integrates an asynchronous OpenAI client with a Literal AI client to create a conversational agent. It utilizes Literal AI’s step decorators for structured logging and tool orchestration within a conversational flow. The agent can process user messages, make decisions on tool usage, and generate responses based on a predefined set of tools and a maximum iteration limit to prevent infinite loops.

.env
LITERAL_API_KEY=
OPENAI_API_KEY=
import json, asyncio
from openai import AsyncOpenAI
from openai.types.chat import *
from literalai import LiteralClient

from dotenv import load_dotenv
load_dotenv()

client = AsyncOpenAI()
lc = LiteralClient()
lc.instrument_openai()

MAX_ITER = 5

# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
@lc.step(type="tool")
def get_current_weather(location, unit=None):
    """Get the current weather in a given location"""
    unit = unit or "Farenheit"
    weather_info = {
        "location": location,
        "temperature": "72",
        "unit": unit,
        "forecast": ["sunny", "windy"],
    }

    return json.dumps(weather_info)


tools = [
    {
        "type": "function",
        "function": {
            "name": "get_current_weather",
            "description": "Get the current weather in a given location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "The city and state, e.g. San Francisco, CA",
                    },
                    "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
                },
                "required": ["location"],
            },
        },
    }
]


@lc.step(type="run")
async def run(message_history):

    tool_called = True
    cur_iter = 0
    while tool_called and cur_iter < MAX_ITER:
        settings = {
            "model": "gpt-4",
            "tools": tools,
            "tool_choice": "auto",
        }
        response: ChatCompletion = await literalClient.chat.completions.create(
            messages=message_history, **settings
        )

        message: ChatCompletionMessage = response.choices[0].message

        message_history.append(message)
        if not message.tool_calls:
            tool_called = False

        for tool_call in message.tool_calls or []:
            if tool_call.type == "function":
                # print(globals().keys())
                func = globals()[tool_call.function.name]
                res = func(tool_call.function.arguments)
                message_history.append({
                    "role": "tool",
                    "name": tool_call.function.name,
                    "content": res,
                    "tool_call_id": tool_call.id,
                })

        cur_iter += 1
    
    return message_history



if __name__ == "__main__":
    
    message_history = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "what's the weather in sf"}
        ]
    
    message_history = asyncio.run(run(message_history))
    lc.flush_and_stop()

With the integration of Literal AI, you can now visualize runs and LLM calls directly on the Literal AI platform, enhancing transparency and debuggability of your AI-driven applications.