Skip to main content
Glama

Smart Warehouse MCP Agent

claude_interface.py7.67 kB
""" Interface to the Claude API for the warehouse simulation. """ import os import json import logging from typing import Dict, Any, Optional, List from datetime import datetime import anthropic from dotenv import load_dotenv class ClaudeInterface: """Interface to the Claude API for the warehouse simulation.""" def __init__( self, api_key: Optional[str] = None, model: Optional[str] = None, logger: Optional[logging.Logger] = None ): # Load environment variables load_dotenv("claude.env") # Set up API key and model self.api_key = api_key or os.getenv("ANTHROPIC_API_KEY") if not self.api_key: raise ValueError("Anthropic API key not provided and not found in environment variables") self.model = model or os.getenv("CLAUDE_MODEL", "claude-3-opus-20240229") # Initialize Claude client self.client = anthropic.Anthropic(api_key=self.api_key) # Set up logger self.logger = logger or logging.getLogger(__name__) def ask( self, prompt: str, system_prompt: Optional[str] = None, warehouse_state: Optional[Dict[str, Any]] = None, max_tokens: int = 1000 ) -> Dict[str, Any]: """ Ask Claude a question and get a response. Args: prompt: The user prompt to send to Claude system_prompt: Optional system prompt to provide context warehouse_state: Optional warehouse state to include in the prompt max_tokens: Maximum number of tokens to generate Returns: Dict containing Claude's response and metadata """ try: # Construct the system prompt if not provided if system_prompt is None: system_prompt = self._get_default_system_prompt() # Add warehouse state to the system prompt if provided if warehouse_state: state_json = json.dumps(warehouse_state, indent=2) system_prompt += f"\n\nCurrent warehouse state:\n```json\n{state_json}\n```" # Log the request self.logger.info(f"Sending request to Claude ({self.model})") self.logger.debug(f"System prompt: {system_prompt}") self.logger.debug(f"User prompt: {prompt}") # Send request to Claude response = self.client.messages.create( model=self.model, system=system_prompt, messages=[ {"role": "user", "content": prompt} ], max_tokens=max_tokens ) # Extract the response content content = response.content[0].text # Log the response self.logger.info(f"Received response from Claude ({len(content)} chars)") self.logger.debug(f"Response: {content}") # Parse the response to extract actions if possible actions = self._extract_actions(content) return { "prompt": prompt, "response": content, "actions": actions, "model": self.model, "timestamp": datetime.now().isoformat() } except Exception as e: self.logger.error(f"Error querying Claude: {str(e)}") return { "prompt": prompt, "error": str(e), "timestamp": datetime.now().isoformat() } def _get_default_system_prompt(self) -> str: """Get the default system prompt for Claude.""" return """ You are an AI assistant managing a smart warehouse system. Your role is to analyze the warehouse state and suggest optimal actions for inventory management, AGV routing, and order processing. When responding, follow these guidelines: 1. Analyze the current warehouse state provided in JSON format 2. Identify any issues that need attention (low inventory, pending orders, etc.) 3. Suggest specific actions to address these issues 4. Format your action suggestions in a structured way that can be parsed by the system For action suggestions, use the following JSON format within your response: ```json { "actions": [ { "type": "move_agv", "agent": "agv", "action": "move_agv", "agv_id": "AGV001", "destination": "storage_a" }, { "type": "restock_item", "agent": "inventory", "action": "add_inventory", "product_id": "P001", "quantity": 10 } ] } ``` Available action types: - move_agv: Move an AGV to a new location - pick_item: Pick an item from inventory using an AGV - restock_item: Restock an item in inventory - process_order: Update the status of an order - charge_agv: Charge an AGV's battery Available agents: - inventory: For inventory-related actions - agv: For AGV-related actions - restock: For restocking operations - warehouse: For direct warehouse actions Be specific and practical in your suggestions, considering the current state of the warehouse. """ def _extract_actions(self, response: str) -> List[Dict[str, Any]]: """ Extract structured actions from Claude's response. Looks for JSON blocks in the response that contain action definitions. """ actions = [] try: # Look for JSON blocks in the response json_blocks = [] in_json_block = False current_block = [] for line in response.split('\n'): if line.strip() == '```json' or line.strip() == '```': if in_json_block: # End of a JSON block json_blocks.append('\n'.join(current_block)) current_block = [] in_json_block = not in_json_block elif in_json_block: current_block.append(line) # Parse each JSON block for block in json_blocks: try: data = json.loads(block) if "actions" in data and isinstance(data["actions"], list): actions.extend(data["actions"]) except json.JSONDecodeError: self.logger.warning(f"Failed to parse JSON block: {block}") # Also try to find a complete JSON object in the response if not actions: # Look for content between curly braces import re json_matches = re.findall(r'\{[^{}]*\}', response) for match in json_matches: try: data = json.loads(match) if "actions" in data and isinstance(data["actions"], list): actions.extend(data["actions"]) except json.JSONDecodeError: continue except Exception as e: self.logger.error(f"Error extracting actions from response: {str(e)}") return actions

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Ayancodes2003/claude-mcp-agent-for-supply-chain'

If you have feedback or need assistance with the MCP directory API, please join our Discord server