Skip to main content
Glama
itshare4u

Agent Knowledge MCP

list_indices

Retrieve all Elasticsearch indices with document counts and size statistics to manage and monitor data storage in knowledge management systems.

Instructions

List all available Elasticsearch indices with document count and size statistics

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault

No arguments

Implementation Reference

  • Core handler for list_indices tool: Lists all ES indices with doc counts, sizes, metadata from index_metadata index, governance status (documented vs undocumented), rich formatted output.
    async def list_indices() -> str:
        """List all available Elasticsearch indices with basic statistics."""
        try:
            es = get_es_client()
    
            indices = es.indices.get_alias(index="*")
    
            # Get stats for each index
            indices_info = []
            for index_name in indices.keys():
                if not index_name.startswith('.'):  # Skip system indices
                    try:
                        stats = es.indices.stats(index=index_name)
                        doc_count = stats['indices'][index_name]['total']['docs']['count']
                        size = stats['indices'][index_name]['total']['store']['size_in_bytes']
    
                        # Initialize basic index info
                        index_info = {
                            "name": index_name,
                            "docs": doc_count,
                            "size_bytes": size,
                            "description": "No description available",
                            "purpose": "Not documented",
                            "data_types": [],
                            "usage_pattern": "Unknown",
                            "created_date": "Unknown"
                        }
    
                        # Try to get metadata for this index
                        try:
                            metadata_search = {
                                "query": {
                                    "term": {
                                        "index_name": index_name
                                    }
                                },
                                "size": 1
                            }
    
                            metadata_result = es.search(index="index_metadata", body=metadata_search)
    
                            if metadata_result['hits']['total']['value'] > 0:
                                metadata = metadata_result['hits']['hits'][0]['_source']
                                # Merge metadata into index info
                                index_info.update({
                                    "description": metadata.get('description', 'No description available'),
                                    "purpose": metadata.get('purpose', 'Not documented'),
                                    "data_types": metadata.get('data_types', []),
                                    "usage_pattern": metadata.get('usage_pattern', 'Unknown'),
                                    "created_date": metadata.get('created_date', 'Unknown'),
                                    "retention_policy": metadata.get('retention_policy', 'Not specified'),
                                    "related_indices": metadata.get('related_indices', []),
                                    "tags": metadata.get('tags', []),
                                    "created_by": metadata.get('created_by', 'Unknown'),
                                    "has_metadata": True
                                })
                            else:
                                index_info["has_metadata"] = False
    
                        except Exception:
                            # If metadata index doesn't exist or search fails, keep basic info
                            index_info["has_metadata"] = False
    
                        indices_info.append(index_info)
    
                    except:
                        indices_info.append({
                            "name": index_name,
                            "docs": "unknown",
                            "size_bytes": "unknown",
                            "description": "Statistics unavailable",
                            "has_metadata": False
                        })
    
            # Sort indices: metadata-documented first, then by name
            indices_info.sort(key=lambda x: (not x.get('has_metadata', False), x['name']))
    
            # Format the output with metadata information
            result = "βœ… Available indices with metadata:\n\n"
    
            # Count documented vs undocumented
            documented = sum(1 for idx in indices_info if idx.get('has_metadata', False))
            undocumented = len(indices_info) - documented
    
            result += f"πŸ“Š **Index Overview**:\n"
            result += f"   πŸ“‹ Total indices: {len(indices_info)}\n"
            result += f"   βœ… Documented: {documented}\n"
            result += f"   ❌ Undocumented: {undocumented}\n\n"
    
            if undocumented > 0:
                result += f"🚨 **Governance Alert**: {undocumented} indices lack metadata documentation\n"
                result += f"   πŸ’‘ Use 'create_index_metadata' tool to document missing indices\n"
                result += f"   🎯 Proper documentation improves index management and team collaboration\n\n"
    
            # Group indices by documentation status
            documented_indices = [idx for idx in indices_info if idx.get('has_metadata', False)]
            undocumented_indices = [idx for idx in indices_info if not idx.get('has_metadata', False)]
    
            if documented_indices:
                result += f"πŸ“‹ **Documented Indices** ({len(documented_indices)}):\n\n"
                for idx in documented_indices:
                    size_mb = idx['size_bytes'] / 1048576 if isinstance(idx['size_bytes'], (int, float)) else 0
                    result += f"🟒 **{idx['name']}**\n"
                    result += f"   πŸ“ Description: {idx['description']}\n"
                    result += f"   🎯 Purpose: {idx['purpose']}\n"
                    result += f"   πŸ“Š Documents: {idx['docs']}, Size: {size_mb:.1f} MB\n"
                    result += f"   πŸ“‚ Data Types: {', '.join(idx.get('data_types', [])) or 'Not specified'}\n"
                    result += f"   πŸ”„ Usage: {idx.get('usage_pattern', 'Unknown')}\n"
                    result += f"   πŸ“… Created: {idx.get('created_date', 'Unknown')}\n"
                    if idx.get('tags'):
                        result += f"   🏷️ Tags: {', '.join(idx['tags'])}\n"
                    if idx.get('related_indices'):
                        result += f"   πŸ”— Related: {', '.join(idx['related_indices'])}\n"
                    result += "\n"
    
            if undocumented_indices:
                result += f"❌ **Undocumented Indices** ({len(undocumented_indices)}) - Need Metadata:\n\n"
                for idx in undocumented_indices:
                    size_mb = idx['size_bytes'] / 1048576 if isinstance(idx['size_bytes'], (int, float)) else 0
                    result += f"πŸ”΄ **{idx['name']}**\n"
                    result += f"   πŸ“Š Documents: {idx['docs']}, Size: {size_mb:.1f} MB\n"
                    result += f"   ⚠️ Status: No metadata documentation found\n"
                    result += f"   πŸ”§ Action: Use 'create_index_metadata' to document this index\n\n"
    
            # Add metadata improvement suggestions
            if undocumented > 0:
                result += f"πŸ’‘ **Metadata Improvement Suggestions**:\n"
                result += f"   πŸ“‹ Document each index's purpose and data types\n"
                result += f"   🎯 Define usage patterns and access frequencies\n"
                result += f"   πŸ“… Record creation dates and retention policies\n"
                result += f"   πŸ”— Link related indices for better organization\n"
                result += f"   🏷️ Add relevant tags for categorization\n"
                result += f"   πŸ‘€ Track ownership and responsibility\n\n"
    
            return result
    
        except Exception as e:
            # Provide detailed error messages for different types of Elasticsearch errors
            error_message = "❌ Failed to list indices:\n\n"
    
            error_str = str(e).lower()
            if "connection" in error_str or "refused" in error_str:
                error_message += "πŸ”Œ **Connection Error**: Cannot connect to Elasticsearch server\n"
                error_message += f"πŸ“ Check if Elasticsearch is running at the configured address\n"
                error_message += f"πŸ’‘ Try: Use 'setup_elasticsearch' tool to start Elasticsearch\n\n"
            elif "timeout" in error_str:
                error_message += "⏱️ **Timeout Error**: Elasticsearch server is not responding\n"
                error_message += f"πŸ“ Server may be overloaded or slow to respond\n"
                error_message += f"πŸ’‘ Try: Wait and retry, or check server status\n\n"
            else:
                error_message += f"⚠️ **Unknown Error**: {str(e)}\n\n"
    
            error_message += f"πŸ” **Technical Details**: {str(e)}"
    
            return error_message
  • Tool schema definition via FastMCP decorator: provides description, tags; no input parameters (returns str).
    @app.tool(
        description="List all available Elasticsearch indices with document count and size statistics",
        tags={"elasticsearch", "list", "indices", "stats"}
    )
  • Mounts the elasticsearch_index sub-server app (containing list_indices, create_index, delete_index) into the unified elasticsearch_server FastMCP app.
    app.mount(index_app)               # 3 tools: index management
  • Imports the elasticsearch_index sub-server app for mounting.
    from .sub_servers.elasticsearch_index import app as index_app

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/itshare4u/AgentKnowledgeMCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server