Skip to main content
Glama
memory.py4.72 kB
"""In-memory cache implementation.""" import asyncio import time from typing import Any, Dict, List, Optional from .base import BaseCache, CacheEntry class MemoryCache(BaseCache): """In-memory cache with LRU eviction.""" def __init__(self, name: str = "memory", max_size: int = 1000, default_ttl: Optional[float] = None) -> None: """Initialize memory cache. Args: name: Cache name max_size: Maximum number of entries default_ttl: Default TTL in seconds """ super().__init__(name, max_size) self.default_ttl = default_ttl self._cache: Dict[str, CacheEntry] = {} self._access_order: List[str] = [] # For LRU tracking self._lock = asyncio.Lock() async def get(self, key: str) -> Optional[Any]: """Get value from cache.""" if not self.enabled: return None async with self._lock: entry = self._cache.get(key) if entry is None: self._record_miss() return None if entry.is_expired(): del self._cache[key] if key in self._access_order: self._access_order.remove(key) self._record_miss() return None # Update access statistics and order entry.update_access() if key in self._access_order: self._access_order.remove(key) self._access_order.append(key) self._record_hit() return entry.value async def set(self, key: str, value: Any, ttl: Optional[float] = None) -> None: """Set value in cache.""" if not self.enabled: return async with self._lock: # Use provided TTL or default effective_ttl = ttl if ttl is not None else self.default_ttl # Create new entry entry = CacheEntry(key=key, value=value, created_at=time.time(), accessed_at=time.time(), ttl=effective_ttl) # Check if we need to evict entries if key not in self._cache and len(self._cache) >= self.max_size: await self._evict_lru() # Add or update entry self._cache[key] = entry # Update access order if key in self._access_order: self._access_order.remove(key) self._access_order.append(key) self._record_set() async def delete(self, key: str) -> bool: """Delete value from cache.""" async with self._lock: if key in self._cache: del self._cache[key] if key in self._access_order: self._access_order.remove(key) self._record_delete() return True return False async def clear(self) -> None: """Clear all cache entries.""" async with self._lock: self._cache.clear() self._access_order.clear() self._logger.info(f"Cleared cache: {self.name}") async def size(self) -> int: """Get current cache size.""" return len(self._cache) async def keys(self) -> List[str]: """Get all cache keys.""" async with self._lock: # Clean expired entries first await self._clean_expired() return list(self._cache.keys()) async def _evict_lru(self) -> None: """Evict least recently used entry.""" if self._access_order: lru_key = self._access_order[0] del self._cache[lru_key] self._access_order.remove(lru_key) self._record_eviction() self._logger.debug(f"Evicted LRU entry: {lru_key}") async def _clean_expired(self) -> None: """Clean expired entries.""" current_time = time.time() expired_keys = [] for key, entry in self._cache.items(): if entry.is_expired(): expired_keys.append(key) for key in expired_keys: del self._cache[key] if key in self._access_order: self._access_order.remove(key) if expired_keys: self._logger.debug(f"Cleaned {len(expired_keys)} expired entries") def get_detailed_stats(self) -> Dict[str, Any]: """Get detailed cache statistics.""" stats = self.get_stats() # Add memory-specific stats stats.update( { "current_size": len(self._cache), "access_order_length": len(self._access_order), "default_ttl": self.default_ttl, } ) return stats

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/namnd00/mcp-server-hero'

If you have feedback or need assistance with the MCP directory API, please join our Discord server