Skip to main content
Glama

Continuo Memory System

by GtOkAi
auto_mode_demo.py3.9 kB
""" Example script: Autonomous Mode Demonstrates Observe → Plan → Act → Reflect cycle """ import asyncio from typing import Any, Optional from continuo.auto_mode import AutoMode, AutoModeConfig from continuo.embedding_service import EmbeddingService from continuo.memory_server import MemoryServer # Simulators for demonstration class MockEventSource: """Simulates build/test events""" def __init__(self): self.events = [ { "id": "evt1", "type": "build_error", "description": "ImportError in src/main.py line 10", "file": "src/main.py", }, { "id": "evt2", "type": "test_failure", "description": "test_payment failed: AssertionError", "file": "tests/test_payment.py", }, ] self.index = 0 def get_event(self) -> Optional[dict[str, Any]]: """Returns next event""" if self.index >= len(self.events): return None event = self.events[self.index] self.index += 1 return event class MockLLMPlanner: """Simulates planner generating plans""" def plan(self, prompt: str) -> dict[str, Any]: """Generates mock plan""" # In practice, you would call OpenAI/Claude here return { "tasks": [ { "action": "fix_import", "params": {"file": "src/main.py", "line": 10}, "description": "Fix missing import", }, { "action": "run_tests", "params": {"test_file": "tests/test_payment.py"}, "description": "Re-run tests", }, ], "reasoning": "Import error must be fixed before tests", } class MockToolExecutor: """Simulates tool execution""" def execute(self, action: str, params: dict[str, Any]) -> str: """Executes mock action""" print(f" 🔧 Executing: {action} with {params}") return f"✓ {action} completed" async def main(): """Demonstrates autonomous mode""" print("=" * 60) print("🤖 Continuo - Autonomous Mode - Demo") print("=" * 60) # 1. Setup print("\n[1/3] Initializing components...") embedding_service = EmbeddingService(provider="local") memory = MemoryServer( embedding_service=embedding_service, db_path="./auto_mode_chroma_db", ) config = AutoModeConfig( max_auto_edits=5, max_iterations=2, require_confirmation=False, # Demo without confirmation rate_limit_seconds=1, ) auto_mode = AutoMode(memory_server=memory, config=config) # 2. Setup simulators print("[2/3] Configuring simulators...") event_source = MockEventSource() llm_planner = MockLLMPlanner() tool_executor = MockToolExecutor() # 3. Run cycle print("[3/3] Starting autonomous cycle...\n") print("=" * 60) await auto_mode.run_cycle( event_source=event_source.get_event, llm_planner=llm_planner.plan, tool_executor=tool_executor.execute, ) # 4. Results print("\n" + "=" * 60) print("📊 Results") print("=" * 60) print(f" Iterations executed: {auto_mode.iteration_count}") print(f" Edits performed: {auto_mode.edit_count}/{config.max_auto_edits}") # 5. Memory print("\n" + "=" * 60) print("🧠 Stored Knowledge") print("=" * 60) # Search action logs results = memory.retrieve("executed actions", top_k=3) for i, doc in enumerate(results["documents"], 1): print(f"\n [{i}] {doc[:100]}...") print("\n✅ Demo completed!") print("📝 Detailed logs in: auto_mode.log") if __name__ == "__main__": asyncio.run(main())

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/GtOkAi/continuo-memory-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server