mcp_support_server.py•10.5 kB
from pathlib import Path
import logging
from mcp.server.fastmcp import FastMCP
logger = logging.getLogger(__name__)
mcp = FastMCP("adobe-support-mcp")
def parse_mixed_content_with_llm(content: str) -> tuple[str, str, str]:
"""
Use LLM to categorize mixed content into Findings, Analysis, and Recommendations.
Returns a prompt for the LLM to process.
"""
categorization_prompt = f"""
You are an Adobe Commerce Support Engineer. Please analyze the following mixed content and categorize it into three sections:
**CONTENT TO CATEGORIZE:**
{content}
**INSTRUCTIONS:**
1. Extract and organize the content into exactly these three categories:
- **Findings**: Facts, observations, what was discovered, error messages, symptoms
- **Analysis**: Technical explanation, root cause analysis, why the issue occurred
- **Recommendations**: Specific action steps, solutions, next steps to resolve the issue
2. Format your response EXACTLY as follows (use these exact headers):
### Findings
[Put factual findings here - what was observed, discovered, or reported]
### Analysis
[Put technical analysis here - root cause, explanation of why the issue occurred]
### Recommendations
[Put specific action steps and solutions here - what should be done to fix it]
**IMPORTANT:**
- Only include content that was provided in the original text
- Do not add any new suggestions or information not present in the source
- Keep the technical accuracy and specificity of the original content
- If a category has no relevant content, write "No specific information provided."
"""
return categorization_prompt
def parse_findings(file_path="find.md"):
abs_path = (Path.cwd() / file_path).resolve()
logger.info(f"File path: {abs_path}")
text = abs_path.read_text(encoding="utf-8")
findings, analysis, recommendations = "", "", ""
current = None
for line in text.splitlines():
line = line.strip()
if line.startswith("### Findings"): current = "findings"; continue
if line.startswith("### Analysis"): current = "analysis"; continue
if line.startswith("### Recommendations"): current = "recommendations"; continue
if current == "findings": findings += line + "\n"
elif current == "analysis": analysis += line + "\n"
elif current == "recommendations": recommendations += line + "\n"
return findings.strip(), analysis.strip(), recommendations.strip()
def parse_structured_content(content: str) -> tuple[str, str, str]:
"""
Parse content that's already structured with ### headers.
"""
findings, analysis, recommendations = "", "", ""
current = None
for line in content.splitlines():
line = line.strip()
if line.startswith("### Findings"): current = "findings"; continue
if line.startswith("### Analysis"): current = "analysis"; continue
if line.startswith("### Recommendations"): current = "recommendations"; continue
if current == "findings": findings += line + "\n"
elif current == "analysis": analysis += line + "\n"
elif current == "recommendations": recommendations += line + "\n"
return findings.strip(), analysis.strip(), recommendations.strip()
@mcp.tool()
def categorize_mixed_content(find_file: str = "find.md", categorized_file: str = "categorized.md") -> str:
"""
Analyze mixed content from find.md and create an LLM prompt to categorize it into
Findings, Analysis, and Recommendations sections.
"""
abs_path = (Path.cwd() / find_file).resolve()
logger.info(f"Reading mixed content from: {abs_path}")
if not abs_path.exists():
return f"Error: File {find_file} not found at {abs_path}"
content = abs_path.read_text(encoding="utf-8").strip()
if not content:
return f"Error: File {find_file} is empty"
# Check if content is already structured
if "### Findings" in content and "### Analysis" in content and "### Recommendations" in content:
return f"Content in {find_file} is already properly categorized. Use generate_support_reply directly."
# Generate LLM prompt for categorization
categorization_prompt = parse_mixed_content_with_llm(content)
# Save the prompt for LLM processing
out_path = Path.cwd() / categorized_file
out_path.write_text(categorization_prompt, encoding="utf-8")
return categorization_prompt
@mcp.tool()
def generate_support_reply(find_file: str = "find.md", resp_file: str = "resp.md", auto_categorize: bool = True) -> str:
"""
Read find.md and generate Adobe Commerce support reply using Cursor LLM.
If auto_categorize is True, will automatically handle mixed content by creating categorization prompts.
"""
abs_path = (Path.cwd() / find_file).resolve()
if not abs_path.exists():
return f"Error: File {find_file} not found at {abs_path}"
content = abs_path.read_text(encoding="utf-8").strip()
if not content:
return f"Error: File {find_file} is empty"
# Check if content is already structured
is_structured = ("### Findings" in content and "### Analysis" in content and "### Recommendations" in content)
if is_structured:
# Parse structured content
findings, analysis, recommendations = parse_structured_content(content)
# Generate final customer response
response_prompt = f"""
You are an Adobe Commerce Customer Support Engineer.
Generate a professional customer response using ONLY the information provided below. Do not add any additional suggestions or information.
Use this EXACT format:
Hello,
Thank you for reaching out to Adobe Commerce Support. We appreciate your patience while we investigated your request. Below are our findings and recommendations.
### **Findings:**
{findings if findings else "No specific findings provided."}
### **Analysis:**
{analysis if analysis else "No specific analysis provided."}
### **Recommendations / Next Steps:**
{recommendations if recommendations else "No specific recommendations provided."}
If you have any questions or need further clarification, please let us know. We value your partnership and are happy to assist you.
Best regards,
Gihan Kavinga
Adobe Commerce Support Team
"""
# Write final response prompt
out_path = Path.cwd() / resp_file
out_path.write_text(response_prompt, encoding="utf-8")
return response_prompt
elif auto_categorize:
# Content is mixed - create categorization prompt
categorization_prompt = parse_mixed_content_with_llm(content)
# Save categorization prompt
categorize_path = Path.cwd() / "categorize_prompt.md"
categorize_path.write_text(categorization_prompt, encoding="utf-8")
# Create instruction for user
instruction = f"""
STEP 1: Mixed content detected in {find_file}.
I've created a categorization prompt in 'categorize_prompt.md'. Please:
1. Run the LLM on this prompt to categorize the content
2. Save the LLM output as 'categorized_content.md'
3. Then run generate_support_reply with find_file='categorized_content.md'
OR use the categorize_mixed_content tool first, then use the LLM output as your new find_file.
CATEGORIZATION PROMPT:
{categorization_prompt}
"""
out_path = Path.cwd() / resp_file
out_path.write_text(instruction, encoding="utf-8")
return instruction
else:
return f"Error: Content in {find_file} is not properly structured. Use auto_categorize=True or the categorize_mixed_content tool first."
@mcp.tool()
def generate_jira_update(find_file: str = "find.md", jira_file: str = "jira_update.md", auto_categorize: bool = True) -> str:
"""
Generate internal JIRA ticket update from find.md in the specified format:
- What is the issue? What happened?
- What is the root cause?
- What are the recommendations?
"""
abs_path = (Path.cwd() / find_file).resolve()
if not abs_path.exists():
return f"Error: File {find_file} not found at {abs_path}"
content = abs_path.read_text(encoding="utf-8").strip()
if not content:
return f"Error: File {find_file} is empty"
# Check if content is already structured
is_structured = ("### Findings" in content and "### Analysis" in content and "### Recommendations" in content)
if is_structured:
# Parse structured content
findings, analysis, recommendations = parse_structured_content(content)
# Generate JIRA update format
jira_update = f"""* *What is the issue? What happened?*
{findings if findings else "No specific information provided about the issue."}
* *What is the root cause?*
{analysis if analysis else "No specific root cause analysis provided."}
* *What are the recommendations?*
{recommendations if recommendations else "No specific recommendations provided."}"""
# Write JIRA update
out_path = Path.cwd() / jira_file
out_path.write_text(jira_update, encoding="utf-8")
return jira_update
elif auto_categorize:
# Content is mixed - create categorization prompt
categorization_prompt = parse_mixed_content_with_llm(content)
# Save categorization prompt
categorize_path = Path.cwd() / "jira_categorize_prompt.md"
categorize_path.write_text(categorization_prompt, encoding="utf-8")
# Create instruction for user
instruction = f"""
STEP 1: Mixed content detected in {find_file}.
I've created a categorization prompt in 'jira_categorize_prompt.md'. Please:
1. Run the LLM on this prompt to categorize the content
2. Save the LLM output as 'jira_categorized_content.md'
3. Then run generate_jira_update with find_file='jira_categorized_content.md'
OR use the categorize_mixed_content tool first, then use the LLM output as your new find_file.
CATEGORIZATION PROMPT:
{categorization_prompt}
"""
out_path = Path.cwd() / jira_file
out_path.write_text(instruction, encoding="utf-8")
return instruction
else:
return f"Error: Content in {find_file} is not properly structured. Use auto_categorize=True or the categorize_mixed_content tool first."
def main():
mcp.run()
if __name__ == "__main__":
main()