main.py•4.92 kB
from fastapi import FastAPI
from sse import create_sse_server
from mcp.server.fastmcp import FastMCP
from openai import AzureOpenAI
import os
import json
import random
import requests
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
app = FastAPI()
# Initialize the SSE MCP server
mcp = FastMCP("Echo")
# Mount the SSE Starlette app
app.mount("/", create_sse_server(mcp))
# Health-check endpoint
@app.get("/")
def read_root():
return {"status": "ok"}
# Azure OpenAI client for image generation
env_api_key = os.getenv("AZURE_OPENAI_API_KEY")
env_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
client = AzureOpenAI(
api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
api_key=env_api_key,
azure_endpoint=env_endpoint
)
# Echo resource
@mcp.resource("echo://{message}")
def echo_resource(message: str) -> str:
"""
An echo resource that returns the message passed in the URL.
"""
return f"Resource echo: {message}"
# Echo tool
@mcp.tool()
def echo_tool(message: str) -> str:
"""
An echo tool that returns the message passed in the request.
"""
return f"Tool echo: {message}"
# Echo prompt
@mcp.prompt()
def echo_prompt(message: str) -> str:
"""
An echo prompt that returns the message passed in the request.
"""
return f"Please process this message: {message}"
# Think Tool: a tool to think before taking any action
@mcp.tool()
def think_tool(input: str) -> str:
"""
A tool to think before taking any action. Returns a thoughtful reflection on the input.
"""
return f"Thinking about: {input}"
# Random Number Generator Tool
@mcp.tool()
def random_number(min_value: int = 0, max_value: int = 100) -> int:
"""
Generates a random integer between min_value and max_value (inclusive).
"""
return random.randint(min_value, max_value)
# Image Generation Tool
@mcp.tool()
def generate_image(prompt: str) -> str:
"""
Generates an image based on the given prompt using Azure OpenAI DALL-E and returns the image URL.
"""
result = client.images.generate(
model=os.getenv("AZURE_OPENAI_IMAGE_MODEL"),
prompt=prompt,
n=1
)
json_response = json.loads(result.model_dump_json())
return json_response["data"][0]["url"]
# Podcast Generation Tool
@mcp.tool()
def generate_podcast(
prompt: str,
duration: int = 3,
name1: str = "",
voice1: str = "",
age1: int = None,
first_person_quality1: str = "",
first_person_quality2: str = "",
gender1: str = "",
name2: str = "",
voice2: str = "",
age2: int = None,
second_person_quality1: str = "",
second_person_quality2: str = "",
gender2: str = "",
generate_text: bool = True
) -> dict:
"""
AI Prompt:
Generate a {duration}-minute podcast on the topic: "{prompt}".
Host:
- Name: {name1}
- Voice: {voice1}
- Qualities: {first_person_quality1}, {first_person_quality2}
- Gender: {gender1} (Must be 'M' or 'F')
- Age: {age1}
Guest:
- Name: {name2}
- Voice: {voice2}
- Qualities: {second_person_quality1}, {second_person_quality2}
- Gender: {gender2} (Must be 'M' or 'F')
- Age: {age2}
Available voices include:
Rachel, Drew, Clyde, Paul, Dave, Domi, Fin, Sarah,
Antoni, Thomas, Charlie, George, Emily, Alice,
Matilda, James, Joseph, Jeremy, Michael,
Liam, Dorothy, Callum, Patrick, Glinda,
Giovanni, Mimi, Freya, Brian.
Return the generated script and audio URLs in JSON format.
"""
# Prepare the webhook URL
webhook_url = os.getenv("PODCAST_WEBHOOK_URL")
# Prepare the payload with the provided format
payload = {
"generate_text": generate_text,
"first_char_name": name1 or "Mark",
"first_char_gender": gender1 or "M",
"first_char_age": age1 or 34,
"first_char_voicename": voice1 or "Thomas",
"second_char_name": name2 or "Sophia",
"second_char_gender": gender2 or "F",
"second_char_age": age2 or 36,
"second_char_voicename": voice2 or "Emily",
"first_person_quality1": first_person_quality1 or "authoritative",
"first_person_quality2": first_person_quality2 or "insightful",
"second_person_quality1": second_person_quality1 or "analytical",
"second_person_quality2": second_person_quality2 or "strategic"
}
# Add prompt and duration to the payload
payload["prompt"] = prompt
payload["duration"] = duration
headers = {
"accept": "application/json",
"Content-Type": "application/json"
}
# Make the request to the webhook
response = requests.post(webhook_url, headers=headers, json=payload)
response.raise_for_status()
return response.json()
if __name__ == "__main__":
import uvicorn
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)