Skip to main content
Glama

Claude Context

by zilliztech
test_endtoend.pyβ€’4.92 kB
#!/usr/bin/env python3 """ Claude Context End-to-End Test Use TypeScriptExecutor to call complete Claude Context workflow """ import os import sys from pathlib import Path # Add python directory to path sys.path.append(str(Path(__file__).parent)) from ts_executor import TypeScriptExecutor def run_context_endtoend_test(): """Run Claude Context end-to-end test""" # Configuration parameters config = { "openaiApiKey": os.environ.get("OPENAI_API_KEY", "your-openai-api-key"), "milvusAddress": os.environ.get("MILVUS_ADDRESS", "localhost:19530"), "codebasePath": str( Path(__file__).parent.parent / "packages" / "core" / "src" ), # Index core source code "searchQuery": "embedding creation and vector database configuration", } print("πŸš€ Starting Claude Context end-to-end test") print(f"πŸ“Š Configuration:") print(f" - Codebase path: {config['codebasePath']}") print(f" - Vector database: {config['milvusAddress']}") print(f" - Search query: {config['searchQuery']}") print( f" - OpenAI API: {'βœ… Configured' if config['openaiApiKey'] != 'your-openai-api-key' else '❌ Need to set OPENAI_API_KEY environment variable'}" ) print() try: executor = TypeScriptExecutor() # Call end-to-end test result = executor.call_method( "./test_context.ts", "testContextEndToEnd", config ) # Output results if result.get("success"): print("βœ… End-to-end test successful!") print(f"πŸ“… Timestamp: {result.get('timestamp')}") # Display configuration info config_info = result.get("config", {}) print(f"πŸ”§ Configuration:") print(f" - Embedding provider: {config_info.get('embeddingProvider')}") print(f" - Embedding model: {config_info.get('embeddingModel')}") print(f" - Embedding dimension: {config_info.get('embeddingDimension')}") print(f" - Vector database: {config_info.get('vectorDatabase')}") print(f" - Chunk size: {config_info.get('chunkSize')}") print(f" - Chunk overlap: {config_info.get('chunkOverlap')}") # Display indexing statistics index_stats = result.get("indexStats", {}) print(f"πŸ“š Indexing statistics:") print(f" - Indexed files: {index_stats.get('indexedFiles', 0)}") print(f" - Total chunks: {index_stats.get('totalChunks', 0)}") # Display search results summary = result.get("summary", {}) search_results = result.get("searchResults", []) print(f"πŸ” Search results:") print(f" - Query: '{result.get('searchQuery')}'") print(f" - Results found: {summary.get('foundResults', 0)} items") print(f" - Average relevance: {summary.get('avgScore', 0):.3f}") # Display top 3 search results if search_results: print(f"πŸ“‹ Top {min(3, len(search_results))} most relevant results:") for i, item in enumerate(search_results[:3]): print( f" {i+1}. {item['relativePath']} (lines {item['startLine']}-{item['endLine']})" ) print( f" Language: {item['language']}, Relevance: {item['score']:.3f}" ) print(f" Preview: {item['contentPreview'][:100]}...") print() return True else: print("❌ End-to-end test failed") print(f"Error: {result.get('error')}") if result.get("stack"): print(f"Stack trace: {result.get('stack')}") return False except Exception as e: print(f"❌ Execution failed: {e}") return False def main(): """Main function""" print("=" * 60) print("πŸ§ͺ Claude Context End-to-End Test") print("=" * 60) print() success = run_context_endtoend_test() print() print("=" * 60) if success: print("πŸŽ‰ Test completed! Claude Context end-to-end workflow runs successfully!") print() print("πŸ’‘ This proves:") print(" βœ… Can call TypeScript Claude Context from Python") print(" βœ… Supports complete indexing and search workflow") print(" βœ… Supports complex configuration and parameter passing") print(" βœ… Can get detailed execution results and statistics") else: print("❌ Test failed. Please check:") print(" - OPENAI_API_KEY environment variable is correctly set") print(" - Milvus vector database is running properly") print(" - packages/core code is accessible") print("=" * 60) if __name__ == "__main__": main()

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/zilliztech/claude-context'

If you have feedback or need assistance with the MCP directory API, please join our Discord server