from mem0 import Memory
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "test",
"host": "localhost",
"port": 6333,
"embedding_model_dims": 768, # Change this according to your local model's dimensions
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.1:latest",
"temperature": 0,
"max_tokens": 2000,
"ollama_base_url": "http://localhost:11434", # Ensure this URL is correct
},
},
"embedder": {
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
# Alternatively, you can use "snowflake-arctic-embed:latest"
"ollama_base_url": "http://localhost:11434",
},
},
}
# Initialize Memory with the configuration
m = Memory.from_config(config)
# Add a memory
m.add("I'm visiting Paris", user_id="john")
# Retrieve memories
memories = m.get_all(user_id="john")