complete_demo.pyā¢9.6 kB
"""
Complete Example: End-to-End Integration
Demonstrates the complete Continuo Memory System workflow
"""
import asyncio
import sys
from pathlib import Path
# Adiciona src ao path
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
from continuo.embedding_service import EmbeddingService
from continuo.hierarchical_compression import HierarchicalCompressor
from continuo.memory_server import MemoryServer
async def main():
"""Complete system demonstration"""
print("=" * 70)
print("š§ CONTINUO MEMORY SYSTEM - COMPLETE DEMONSTRATION")
print("=" * 70)
# ========================================
# PHASE 1: Initial Setup
# ========================================
print("\nš¦ PHASE 1: System Initialization")
print("-" * 70)
print("\n[1/3] Initializing Embedding Service (local)...")
print(" ⢠Using sentence-transformers")
print(" ⢠Model: all-MiniLM-L6-v2 (384 dimensions)")
embedding_service = EmbeddingService(provider="local")
print(" ā Embedding service ready!")
print("\n[2/3] Configuring Hierarchical Compressor...")
print(" ⢠Working set: 10 documents")
print(" ⢠Decay: 7 days (168 hours)")
print(" ⢠Ratio N0āN1: 5x")
print(" ⢠Ratio N1āN2: 5x")
compressor = HierarchicalCompressor(
max_working_set_size=10,
decay_hours=168,
compression_ratio_n1=5,
compression_ratio_n2=5,
)
print(" ā Compressor configured!")
print("\n[3/3] Initializing Memory Server...")
print(" ⢠Database: ChromaDB")
print(" ⢠Path: ./demo_complete_db")
memory = MemoryServer(
embedding_service=embedding_service,
db_path="./demo_complete_db",
collection_name="demo_collection",
hierarchical_compressor=compressor,
)
print(" ā Memory server operational!")
# ========================================
# PHASE 2: Development Session Simulation
# ========================================
print("\n" + "=" * 70)
print("š» PHASE 2: Simulating Development Session")
print("-" * 70)
# Simulates knowledge being stored during development
knowledge_items = [
{
"text": "Implemented authenticate_user() function in src/auth/service.py. Uses JWT tokens with 24h expiration.",
"metadata": {
"file": "src/auth/service.py",
"type": "implementation",
"tags": ["auth", "jwt"],
},
"level": "N0",
},
{
"text": "Critical bug fixed: race condition in shopping cart. Added mutex in src/cart/manager.py line 87.",
"metadata": {
"file": "src/cart/manager.py",
"type": "bug_fix",
"severity": "critical",
},
"level": "N0",
},
{
"text": "Architectural decision: use PostgreSQL for transactional data, Redis for session cache, S3 for uploads.",
"metadata": {"type": "decision", "category": "architecture"},
"level": "N0",
},
{
"text": "Code pattern established: validate ALL inputs with Pydantic models before processing.",
"metadata": {"type": "pattern", "category": "security"},
"level": "N0",
},
{
"text": "Important commit: Migration to async/await in payment service. Performance improved 3x.",
"metadata": {
"file": "src/payment/service.py",
"type": "refactoring",
"impact": "high",
},
"level": "N0",
},
{
"text": "Endpoint /api/v1/checkout implemented. Processes payments via Stripe, returns 201 on success.",
"metadata": {
"file": "src/api/checkout.py",
"type": "api",
"method": "POST",
},
"level": "N0",
},
]
print("\nš Storing session knowledge...\n")
for i, item in enumerate(knowledge_items, 1):
doc_id = memory.store(
text=item["text"],
metadata=item["metadata"],
level=item["level"],
)
preview = item["text"][:50] + "..." if len(item["text"]) > 50 else item["text"]
print(f" [{i}/6] ā {doc_id[:8]}... | {preview}")
# ========================================
# PHASE 3: Semantic Searches
# ========================================
print("\n" + "=" * 70)
print("š PHASE 3: Contextual Semantic Searches")
print("-" * 70)
queries = [
("How does authentication work?", "Searching for auth information..."),
("Did we have checkout issues?", "Checking bug history..."),
("What is the database architecture?", "Retrieving architectural decisions..."),
("How to validate input data?", "Consulting code patterns..."),
]
for query, description in queries:
print(f"\nš {description}")
print(f" Query: '{query}'")
results = memory.retrieve(query, top_k=2)
if results["documents"]:
for i, (doc, meta, dist) in enumerate(
zip(
results["documents"],
results["metadatas"],
results["distances"],
),
1,
):
similarity = (1 - dist) * 100
print(f"\n [{i}] Similarity: {similarity:.1f}%")
print(f" {doc[:80]}...")
# Show relevant metadata
if "file" in meta:
print(f" š File: {meta['file']}")
if "type" in meta:
print(f" š·ļø Type: {meta['type']}")
else:
print(" ā ļø No results found")
# ========================================
# PHASE 4: Hierarchical Compression
# ========================================
print("\n" + "=" * 70)
print("š¦ PHASE 4: Demonstrating Hierarchical Compression")
print("-" * 70)
print("\n[1/2] Creating micro-summary (N1) from related chunks...")
# Search for chunks about authentication
auth_chunks_results = memory.retrieve("authentication security", top_k=3)
chunks_for_summary = []
for i, (doc, meta, _) in enumerate(
zip(
auth_chunks_results["documents"],
auth_chunks_results["metadatas"],
auth_chunks_results["ids"],
)
):
chunks_for_summary.append(
{"id": auth_chunks_results["ids"][i], "text": doc, "metadata": meta}
)
if chunks_for_summary:
micro_summary = compressor.create_micro_summary(
chunks_for_summary,
llm_summarize_fn=lambda text: f"Summary: {' '.join(text.split()[:20])}...",
)
print(" ā Micro-summary created:")
print(f" Source: {len(chunks_for_summary)} chunks")
print(f" Text: {micro_summary['text'][:100]}...")
# Store the summary
summary_id = memory.store(micro_summary["text"], micro_summary["metadata"], level="N1")
print(f" ID: {summary_id[:8]}...")
# ========================================
# PHASE 5: Final Statistics
# ========================================
print("\n" + "=" * 70)
print("š PHASE 5: System Statistics")
print("-" * 70)
stats = memory.get_stats()
print(f"\n Total documents: {stats['total_documents']}")
print("\n By Hierarchical Level:")
print(f" ⢠N0 (Raw chunks): {stats['by_level']['N0_chunks']}")
print(f" ⢠N1 (Micro-summaries): {stats['by_level']['N1_micro_summaries']}")
print(f" ⢠N2 (Meta-summaries): {stats['by_level']['N2_meta_summaries']}")
print("\n Storage:")
print(f" ⢠Path: {stats['db_path']}")
print(f" ⢠Collection: {stats['collection']}")
# ========================================
# PHASE 6: Working Set Demonstration
# ========================================
print("\n" + "=" * 70)
print("šÆ PHASE 6: Optimized Working Set")
print("-" * 70)
print("\n Selecting working set for query: 'payment implementation'")
# Search all items
all_results = memory.retrieve("payment implementation", top_k=10)
print(f"\n Raw results: {len(all_results['documents'])}")
print(f" Working set (after compression): max {compressor.max_working_set_size}")
print("\n Top 3 items in working set:")
for i, (doc, dist) in enumerate(
zip(all_results["documents"][:3], all_results["distances"][:3]), 1
):
similarity = (1 - dist) * 100
print(f" [{i}] {similarity:.1f}% | {doc[:60]}...")
# ========================================
# Conclusion
# ========================================
print("\n" + "=" * 70)
print("ā
COMPLETE DEMONSTRATION FINISHED!")
print("=" * 70)
print("\nš Next Steps:")
print(" 1. Integrate with Cursor via .cursor/mcp.json")
print(" 2. Use @continuo-memory in chat to search context")
print(" 3. Store important decisions with store_memory")
print(" 4. Explore Auto Mode for supervised automation")
print("\nš Complete Documentation:")
print(" ⢠CONTINUO_MEMORY.md")
print(" ⢠examples/memory/README.md")
print(" ⢠continuo.markdown")
print("\n" + "=" * 70)
if __name__ == "__main__":
asyncio.run(main())