"""Handle natural language search using LLM."""
import json
from ..data import PatientDataLoader
from ..llm import create_llm_client
def handle_search(patient_id: str, query: str, loader: PatientDataLoader) -> str:
"""Search patient data using natural language via LLM."""
if not loader.patient_exists(patient_id):
return json.dumps({"error": f"Patient {patient_id} not found"})
# Load all patient data
patient_data = loader.get_all_patient_data(patient_id)
# Convert to JSON for the prompt
patient_json = json.dumps(
{
"demographics": patient_data["demographics"].model_dump(mode='json') if patient_data["demographics"] else None,
"allergies": [a.model_dump(mode='json') for a in patient_data["allergies"]],
"medications": [m.model_dump(mode='json') for m in patient_data["medications"]],
"conditions": [c.model_dump(mode='json') for c in patient_data["conditions"]],
"clinical_notes": [n.model_dump(mode='json') for n in patient_data["clinical_notes"]],
"lab_results": [l.model_dump(mode='json') for l in patient_data["lab_results"]],
"vitals": [v.model_dump(mode='json') for v in patient_data["vitals"]],
"procedures": [p.model_dump(mode='json') for p in patient_data["procedures"]],
},
indent=2,
default=str
)
# Create prompt
prompt = f"""You are a medical records assistant analyzing patient data.
Based on the following patient medical records, answer the query concisely and accurately.
Only use information present in the data provided. If the information is not available, say so.
Patient Data:
{patient_json}
Query: {query}
Provide a structured, factual response based only on the data provided."""
try:
# Initialize LLM client (uses environment variables to determine provider)
llm_client = create_llm_client()
response_text = llm_client.generate(prompt)
return json.dumps({
"query": query,
"response": response_text
}, indent=2)
except ValueError as e:
return json.dumps({"error": str(e)})
except Exception as e:
return json.dumps({"error": f"LLM API error: {str(e)}"})