schemas.py•1.47 kB
from typing import List, Optional, Literal
from pydantic import BaseModel, Field
class CleanPromptInput(BaseModel):
"""Input schema for the clean_prompt tool"""
raw_prompt: str = Field(description="The user's raw, unpolished prompt")
context: str = Field(default="", description="Additional context about the task")
mode: Literal["code", "general"] = "general"
temperature: Optional[float] = Field(default=0.2, ge=0.0, le=1.0)
class QualityScore(BaseModel):
"""Quality assessment of the cleaned prompt"""
score: int = Field(ge=1, le=5, description="Quality score from 1-5")
reasons: List[str] = Field(
default_factory=list, description="Reasons for the score"
)
class CleanPromptOutput(BaseModel):
"""Output schema for the clean_prompt tool"""
cleaned: str = Field(description="The enhanced and cleaned prompt")
notes: List[str] = Field(
default_factory=list, description="Notes about the cleaning process"
)
open_questions: List[str] = Field(
default_factory=list, description="Open questions about the prompt"
)
risks: List[str] = Field(
default_factory=list, description="Potential risks or issues identified"
)
unchanged: bool = Field(
default=False, description="Whether the prompt was already excellent"
)
quality: Optional[QualityScore] = Field(
default=None, description="Quality assessment of the cleaned prompt"
)