Skip to main content
Glama
context.py2.27 kB
"""Context monitoring and token tracking.""" import tiktoken class ContextMonitor: """Tracks token usage and triggers compaction warnings.""" def __init__(self, max_tokens: int = 128_000, model: str = "cl100k_base"): """Initialize monitor. Args: max_tokens: Maximum context window size (default 128k for GPT-4) model: Tiktoken encoding model """ self.max_tokens = max_tokens self._encoder = tiktoken.get_encoding(model) self._current_tokens = 0 self._message_history: list[int] = [] def count_tokens(self, text: str) -> int: """Count tokens in text.""" return len(self._encoder.encode(text)) def add_message(self, content: str) -> int: """Track message tokens. Returns token count for this message.""" tokens = self.count_tokens(content) self._current_tokens += tokens self._message_history.append(tokens) return tokens def get_utilization(self) -> float: """Returns utilization percentage (0-100).""" return (self._current_tokens / self.max_tokens) * 100 def should_compact(self) -> bool: """Returns True if utilization >60%.""" return self.get_utilization() > 60 def should_warn(self) -> bool: """Returns True if utilization >70%.""" return self.get_utilization() > 70 def reset(self, new_token_count: int = 0) -> None: """Reset token count after compaction.""" self._current_tokens = new_token_count self._message_history = [new_token_count] if new_token_count else [] @property def current_tokens(self) -> int: """Current token count.""" return self._current_tokens @property def remaining_tokens(self) -> int: """Tokens remaining before max.""" return max(0, self.max_tokens - self._current_tokens) def get_status(self) -> dict: """Get current status as dict.""" return { "current_tokens": self._current_tokens, "max_tokens": self.max_tokens, "utilization_percent": round(self.get_utilization(), 2), "should_compact": self.should_compact(), "should_warn": self.should_warn(), }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/jamesctucker/pathfinder-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server