🔐 Mem0 is now SOC 2 and HIPAA compliant! We're committed to the highest standards of data security and privacy, enabling secure memory for enterprises, healthcare, and beyond. Learn more
Integrate Mem0 with OpenAI Agents SDK, a lightweight framework for building multi-agent workflows. This integration enables agents to access persistent memory across conversations, enhancing context retention and personalization.
The following example demonstrates how to create an OpenAI agent with Mem0 memory integration:
Copy
Ask AI
import osfrom agents import Agent, Runner, function_toolfrom mem0 import MemoryClient# Set up environment variablesos.environ["OPENAI_API_KEY"] = "your-openai-api-key"os.environ["MEM0_API_KEY"] = "your-mem0-api-key"# Initialize Mem0 clientmem0 = MemoryClient()# Define memory tools for the agent@function_tooldef search_memory(query: str, user_id: str) -> str: """Search through past conversations and memories""" memories = mem0.search(query, user_id=user_id, limit=3) if memories: return "\n".join([f"- {mem['memory']}" for mem in memories]) return "No relevant memories found."@function_tooldef save_memory(content: str, user_id: str) -> str: """Save important information to memory""" mem0.add([{"role": "user", "content": content}], user_id=user_id) return "Information saved to memory."# Create agent with memory capabilitiesagent = Agent( name="Personal Assistant", instructions="""You are a helpful personal assistant with memory capabilities. Use the search_memory tool to recall past conversations and user preferences. Use the save_memory tool to store important information about the user. Always personalize your responses based on available memory.""", tools=[search_memory, save_memory], model="gpt-4o")def chat_with_agent(user_input: str, user_id: str) -> str: """ Handle user input with automatic memory integration. Args: user_input: The user's message user_id: Unique identifier for the user Returns: The agent's response """ # Run the agent (it will automatically use memory tools when needed) result = Runner.run_sync(agent, user_input) return result.final_output# Example usageif __name__ == "__main__": # preferences will be saved in memory (using save_memory tool) response_1 = chat_with_agent( "I love Italian food and I'm planning a trip to Rome next month", user_id="alice" ) print(response_1) # memory will be retrieved using search_memory tool to answer the user query response_2 = chat_with_agent( "Give me some recommendations for food", user_id="alice" ) print(response_2)
Create multiple specialized agents with proper handoffs and shared memory:
Copy
Ask AI
from agents import Agent, Runner, handoffs, function_tool# Specialized agentstravel_agent = Agent( name="Travel Planner", instructions="""You are a travel planning specialist. Use get_user_context to understand the user's travel preferences and history before making recommendations. After providing your response, use store_conversation to save important details.""", tools=[search_memory, save_memory], model="gpt-4o")health_agent = Agent( name="Health Advisor", instructions="""You are a health and wellness advisor. Use get_user_context to understand the user's health goals and dietary preferences. After providing advice, use store_conversation to save relevant information.""", tools=[search_memory, save_memory], model="gpt-4o")# Triage agent with handoffstriage_agent = Agent( name="Personal Assistant", instructions="""You are a helpful personal assistant that routes requests to specialists. For travel-related questions (trips, hotels, flights, destinations), hand off to Travel Planner. For health-related questions (fitness, diet, wellness, exercise), hand off to Health Advisor. For general questions, you can handle them directly using available tools.""", handoffs=[travel_agent, health_agent], model="gpt-4o")def chat_with_handoffs(user_input: str, user_id: str) -> str: """ Handle user input with automatic agent handoffs and memory integration. Args: user_input: The user's message user_id: Unique identifier for the user Returns: The agent's response """ # Run the triage agent (it will automatically handoff when needed) result = Runner.run_sync(triage_agent, user_input) # Store the original conversation in memory conversation = [ {"role": "user", "content": user_input}, {"role": "assistant", "content": result.final_output} ] mem0.add(conversation, user_id=user_id) return result.final_output# Example usageresponse = chat_with_handoffs("Plan a healthy meal for my Italy trip", user_id="alex")print(response)
def interactive_chat(): """Interactive chat interface with memory and handoffs""" user_id = input("Enter your user ID: ") or "demo_user" print(f"Chat started for user: {user_id}") print("Type 'quit' to exit\n") while True: user_input = input("You: ") if user_input.lower() == 'quit': break response = chat_with_handoffs(user_input, user_id) print(f"Assistant: {response}\n")if __name__ == "__main__": interactive_chat()