"""Code refinement tool - second layer LLM for improving code."""
from pathlib import Path
from .file_ops import read_file, write_file
from utils.llm_client import LLMClient
from utils.diff_generator import generate_diff
def load_prompt(prompt_name: str) -> str:
"""Load a prompt template from the prompts directory."""
prompt_path = Path(__file__).parent.parent / "prompts" / f"{prompt_name}.txt"
with open(prompt_path, 'r') as f:
return f.read()
def refine_code(user_request: str, file_path: str, ai_provider: str = "gemini") -> dict:
"""Refine code based on natural language feedback.
Args:
user_request: Natural language description of desired improvements
file_path: Path to the code file to refine
ai_provider: AI provider to use (gemini, claude, openai)
Returns:
Dictionary with original_code, refined_code, diff, and explanation
"""
# Read the current code
try:
original_code = read_file(file_path)
except Exception as e:
return {"error": f"Failed to read file: {str(e)}"}
# Load prompt template and fill in variables
prompt_template = load_prompt("code_refinement")
prompt = prompt_template.format(
user_request=user_request,
original_code=original_code
)
llm = LLMClient(default_provider=ai_provider)
try:
# Call LLM without JSON mode - using text format instead
print(f"[CODE_REFINEMENT] Calling LLM with provider: {ai_provider}")
response = llm.complete(prompt, provider=ai_provider, temperature=0.3, json_mode=False)
print(f"[CODE_REFINEMENT] Received response, length: {len(response)} chars")
# Save response to file for debugging
import tempfile
debug_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt', prefix='llm_response_')
debug_file.write(response)
debug_file.close()
print(f"[CODE_REFINEMENT] Full response saved to: {debug_file.name}")
# Parse the text format
if "EXPLANATION:" not in response or "REFINED_CODE:" not in response:
print(f"[ERROR] Response missing required markers")
return {
"status": "error",
"error": "LLM response missing EXPLANATION: or REFINED_CODE: markers",
"debug_file": debug_file.name,
"response_preview": response[:500]
}
# Extract explanation
explanation_part = response.split("EXPLANATION:")[1].split("REFINED_CODE:")[0].strip()
# Extract code (between ``` markers)
code_part = response.split("REFINED_CODE:")[1].strip()
if "```" in code_part:
# Remove the ``` markers and optional language tag
code_lines = code_part.split("```")
if len(code_lines) >= 2:
refined_code = code_lines[1].strip()
# Remove language tag if present (e.g., ```java)
if '\n' in refined_code:
first_line = refined_code.split('\n')[0]
if first_line and not first_line.strip().startswith(('#', '//', '/*')):
# First line might be language tag
refined_code = '\n'.join(refined_code.split('\n')[1:])
else:
refined_code = code_part.replace("```", "").strip()
else:
refined_code = code_part.strip()
if not refined_code:
return {
"status": "error",
"error": "Failed to extract refined code from response",
"debug_file": debug_file.name
}
print(f"[CODE_REFINEMENT] Extracted code, length: {len(refined_code)} chars")
print(f"[CODE_REFINEMENT] Explanation: {explanation_part[:100]}...")
# Generate diff
diff = generate_diff(original_code, refined_code, file_path)
explanation = explanation_part
return {
"status": "success",
"original_code": original_code,
"refined_code": refined_code,
"diff": diff,
"explanation": explanation,
"file_path": file_path
}
except Exception as e:
return {
"status": "error",
"error": f"LLM refinement failed: {str(e)}",
"error_type": type(e).__name__
}
def apply_refinement(file_path: str, refined_code: str) -> dict:
"""Apply the refined code to the file.
Args:
file_path: Path to the file to update
refined_code: The refined code to write
Returns:
Status of the operation
"""
try:
write_file(file_path, refined_code)
return {
"status": "success",
"message": f"Successfully updated {file_path}"
}
except Exception as e:
return {
"status": "error",
"error": f"Failed to write file: {str(e)}"
}