llm_executor.py•2.8 kB
from __future__ import annotations
import json
import os
from typing import Any, Dict, List
import openai
from openai.types.chat.completions import ChatCompletion
from adapters import load_adapters
from config import get_settings
from signature import signature_to_schema
settings = get_settings()
openai.api_key = settings.openai_api_key
class LLMExecutor:
"""LLM wrapper that converts natural language into MCP invocations."""
def __init__(self) -> None:
self.adapters = load_adapters()
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
def run(self, prompt: str) -> Dict[str, Any]:
"""Return InvokeRequest-like dict and the result."""
functions = self._build_function_schema()
completion: ChatCompletion = openai.chat.completions.create(
model=settings.openai_model,
temperature=0,
messages=[{"role": "user", "content": prompt}],
functions=functions,
)
choice = completion.choices[0]
if choice.message.function_call is None: # pragma: no cover
raise RuntimeError("LLM did not select a function call")
name = choice.message.function_call.name
payload = json.loads(choice.message.function_call.arguments)
adapter_name = name.split(".", 1)[0]
adapter = self.adapters[adapter_name]
result = adapter.call(name, *payload.get("args", []), **payload.get("kwargs", {}))
return {"fq_name": name, "args": payload, "result": result}
# ------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------
def _build_function_schema(self) -> List[Dict[str, Any]]:
"""Create OpenAI function schema from adapter methods (truncated)."""
functions: list[dict[str, Any]] = []
for adapter in self.adapters.values():
for fq_name, fn in adapter.methods.items():
# For brevity we use generic args structure. For perfect coverage one
# could introspect signatures and build JSON Schema; left as TODO.
functions.append(
{
"name": fq_name,
"description": fn.__doc__ or "",
"parameters": signature_to_schema(fn),
}
)
# Limit size to abide by token context; keep first N methods.
if len(functions) > 2560:
break
return functions
if __name__ == "__main__": # pragma: no cover
executor = LLMExecutor()
print(executor.run("List all namespaces in the cluster"))