Skip to main content
Glama

Continuo Memory System

by GtOkAi
basic_usage.py•3.33 kB
""" Example script: Basic usage of Memory Server Demonstrates storage and semantic search """ import asyncio from continuo.embedding_service import EmbeddingService from continuo.hierarchical_compression import HierarchicalCompressor from continuo.memory_server import MemoryServer async def main(): """Memory Server usage example""" print("=" * 60) print("🧠 Continuo - Memory Server - Basic Example") print("=" * 60) # 1. Initialize components print("\n[1/4] Initializing embedding service (local)...") embedding_service = EmbeddingService(provider="local") print("[2/4] Initializing hierarchical compressor...") compressor = HierarchicalCompressor(max_working_set_size=5, decay_hours=168) print("[3/4] Initializing memory server...") memory = MemoryServer( embedding_service=embedding_service, db_path="./example_chroma_db", hierarchical_compressor=compressor, ) # 2. Store some examples print("\n[4/4] Storing knowledge...") examples = [ { "text": "The process_payment() function is in src/payment/processor.py and uses Stripe API", "metadata": {"file": "src/payment/processor.py", "type": "code"}, }, { "text": "Bug fixed: race condition in checkout. Solution: added mutex at line 145", "metadata": {"file": "src/checkout/cart.py", "type": "bug_fix"}, }, { "text": "Architectural decision: use PostgreSQL for transactions, Redis for session cache", "metadata": {"type": "decision"}, }, { "text": "Code pattern: always validate user input with Pydantic models", "metadata": {"type": "pattern"}, }, ] for ex in examples: doc_id = memory.store(ex["text"], ex["metadata"]) print(f" āœ“ Stored: {doc_id[:8]}...") # 3. Semantic search print("\n" + "=" * 60) print("šŸ” Testing semantic search") print("=" * 60) queries = [ "How to process payments?", "Checkout problems", "Which database to use?", ] for query in queries: print(f"\nšŸ“ Query: '{query}'") results = memory.retrieve(query, top_k=2) if results["documents"]: for i, (doc, meta, dist) in enumerate( zip( results["documents"], results["metadatas"], results["distances"], ), 1, ): similarity = 1 - dist print(f"\n [{i}] Similarity: {similarity:.2f}") print(f" {doc[:80]}...") print(f" Meta: {meta}") else: print(" No results found") # 4. Statistics print("\n" + "=" * 60) print("šŸ“Š Memory Statistics") print("=" * 60) stats = memory.get_stats() print(f" Total documents: {stats['total_documents']}") print(f" Chunks (N0): {stats['by_level']['N0_chunks']}") print(f" Micro-summaries (N1): {stats['by_level']['N1_micro_summaries']}") print(f" Meta-summaries (N2): {stats['by_level']['N2_meta_summaries']}") print(f" DB Path: {stats['db_path']}") print("\nāœ… Example completed!") if __name__ == "__main__": asyncio.run(main())

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/GtOkAi/continuo-memory-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server