server.py•10.3 kB
"""MCP server implementation."""
import asyncio
from contextlib import asynccontextmanager
from dataclasses import dataclass
from textwrap import dedent
from typing import Annotated, AsyncIterator, Literal, Optional, cast
from atla import AsyncAtla
from mcp.server.fastmcp import Context, FastMCP
from pydantic import WithJsonSchema
# config
@dataclass
class MCPState:
"""State of the MCP server."""
atla_client: AsyncAtla
# types
AnnotatedLlmPrompt = Annotated[
str,
WithJsonSchema(
{
"description": dedent(
"""The prompt given to an LLM to generate the `llm_response` to be \
evaluated."""
),
"examples": [
"What is the capital of the moon?",
"Explain the difference between supervised and unsupervised learning.",
"Can you summarize the main idea behind transformers in NLP?",
],
}
),
]
AnnotatedLlmResponse = Annotated[
str,
WithJsonSchema(
{
"description": dedent(
"""The output generated by the model in response to the `llm_prompt`, \
which needs to be evaluated."""
),
"examples": [
dedent(
"""The Moon doesn't have a capital — it has no countries, \
governments, or permanent residents"""
),
dedent(
"""Supervised learning uses labeled data to train models to make \
predictions or classifications. Unsupervised learning, on the other \
hand, works with unlabeled data to uncover hidden patterns or \
groupings, such as through clustering or dimensionality reduction."""
),
dedent(
"""Transformers are neural network architectures designed for \
sequence modeling tasks like NLP. They rely on self-attention \
mechanisms to weigh the importance of different input tokens, \
enabling parallel processing of input data. Unlike RNNs, they don't \
process sequentially, which allows for faster training and better \
handling of long-range dependencies."""
),
],
}
),
]
AnnotatedEvaluationCriteria = Annotated[
str,
WithJsonSchema(
{
"description": dedent(
"""The specific criteria or instructions on which to evaluate the \
model output. A good evaluation criteria should provide the model \
with: (1) a description of the evaluation task, (2) a rubric of \
possible scores and their corresponding criteria, and (3) a \
final sentence clarifying expected score format. A good evaluation \
criteria should also be specific and focus on a single aspect of \
the model output. To evaluate a model's response on multiple \
criteria, use the `evaluate_llm_response_on_multiple_criteria` \
function and create individual criteria for each relevant evaluation \
task. Typical rubrics score responses either on a Likert scale from \
1 to 5 or binary scale with scores of 'Yes' or 'No', depending on \
the specific evaluation task."""
),
"examples": [
dedent(
"""Evaluate how well the response fulfills the requirements of the instruction by providing relevant information. This includes responding in accordance with the explicit and implicit purpose of given instruction.
Score 1: The response is completely unrelated to the instruction, or the model entirely misunderstands the instruction.
Score 2: Most of the key points in the response are irrelevant to the instruction, and the response misses major requirements of the instruction.
Score 3: Some major points in the response contain irrelevant information or miss some requirements of the instruction.
Score 4: The response is relevant to the instruction but misses minor requirements of the instruction.
Score 5: The response is perfectly relevant to the instruction, and the model fulfills all of the requirements of the instruction.
Your score should be an integer between 1 and 5.""" # noqa: E501
),
dedent(
"""Evaluate whether the information provided in the response is correct given the reference response.
Ignore differences in punctuation and phrasing between the response and reference response.
It is okay if the response contains more information than the reference response, as long as it does not contain any conflicting statements.
Binary scoring
"No": The response is not factually accurate when compared against the reference response or includes conflicting statements.
"Yes": The response is supported by the reference response and does not contain conflicting statements.
Your score should be either "No" or "Yes".
""" # noqa: E501
),
],
}
),
]
AnnotatedExpectedLlmOutput = Annotated[
Optional[str],
WithJsonSchema(
{
"description": dedent(
"""A reference or ideal answer to compare against the `llm_response`. \
This is useful in cases where a specific output is expected from \
the model. Defaults to None."""
)
}
),
]
AnnotatedLlmContext = Annotated[
Optional[str],
WithJsonSchema(
{
"description": dedent(
"""Additional context or information provided to the model during \
generation. This is useful in cases where the model was provided \
with additional information that is not part of the `llm_prompt` \
or `expected_llm_output` (e.g., a RAG retrieval context). \
Defaults to None."""
)
}
),
]
AnnotatedModelId = Annotated[
Literal["atla-selene", "atla-selene-mini"],
WithJsonSchema(
{
"description": dedent(
"""The Atla model ID to use for evaluation. `atla-selene` is the \
flagship Atla model, optimized for the highest all-round performance. \
`atla-selene-mini` is a compact model that is generally faster and \
cheaper to run. Defaults to `atla-selene`."""
)
}
),
]
# tools
async def evaluate_llm_response(
ctx: Context,
evaluation_criteria: AnnotatedEvaluationCriteria,
llm_prompt: AnnotatedLlmPrompt,
llm_response: AnnotatedLlmResponse,
expected_llm_output: AnnotatedExpectedLlmOutput = None,
llm_context: AnnotatedLlmContext = None,
model_id: AnnotatedModelId = "atla-selene",
) -> dict[str, str]:
"""Evaluate an LLM's response to a prompt using a given evaluation criteria.
This function uses an Atla evaluation model under the hood to return a dictionary
containing a score for the model's response and a textual critique containing
feedback on the model's response.
Returns:
dict[str, str]: A dictionary containing the evaluation score and critique, in
the format `{"score": <score>, "critique": <critique>}`.
"""
state = cast(MCPState, ctx.request_context.lifespan_context)
result = await state.atla_client.evaluation.create(
model_id=model_id,
model_input=llm_prompt,
model_output=llm_response,
evaluation_criteria=evaluation_criteria,
expected_model_output=expected_llm_output,
model_context=llm_context,
)
return {
"score": result.result.evaluation.score,
"critique": result.result.evaluation.critique,
}
async def evaluate_llm_response_on_multiple_criteria(
ctx: Context,
evaluation_criteria_list: list[AnnotatedEvaluationCriteria],
llm_prompt: AnnotatedLlmPrompt,
llm_response: AnnotatedLlmResponse,
expected_llm_output: AnnotatedExpectedLlmOutput = None,
llm_context: AnnotatedLlmContext = None,
model_id: AnnotatedModelId = "atla-selene",
) -> list[dict[str, str]]:
"""Evaluate an LLM's response to a prompt across *multiple* evaluation criteria.
This function uses an Atla evaluation model under the hood to return a list of
dictionaries, each containing an evaluation score and critique for a given
criteria.
Returns:
list[dict[str, str]]: A list of dictionaries containing the evaluation score
and critique, in the format `{"score": <score>, "critique": <critique>}`.
The order of the dictionaries in the list will match the order of the
criteria in the `evaluation_criteria_list` argument.
"""
tasks = [
evaluate_llm_response(
ctx=ctx,
evaluation_criteria=criterion,
llm_prompt=llm_prompt,
llm_response=llm_response,
expected_llm_output=expected_llm_output,
llm_context=llm_context,
model_id=model_id,
)
for criterion in evaluation_criteria_list
]
results = await asyncio.gather(*tasks)
return results
# app factory
def app_factory(atla_api_key: str) -> FastMCP:
"""Factory function to create an Atla MCP server with the given API key."""
@asynccontextmanager
async def lifespan(_: FastMCP) -> AsyncIterator[MCPState]:
async with AsyncAtla(
api_key=atla_api_key,
default_headers={
"X-Atla-Source": "mcp-server",
},
) as client:
yield MCPState(atla_client=client)
mcp = FastMCP("Atla", lifespan=lifespan)
mcp.tool()(evaluate_llm_response)
mcp.tool()(evaluate_llm_response_on_multiple_criteria)
return mcp