"""
Tests for Pydantic models used in the thinking augmentation system.
This module contains comprehensive tests for all data models including
CritiqueRequest, CritiqueResponse, SynthesisResponse, and related models.
"""
import pytest
from pydantic import ValidationError
from elrond_mcp.models import (
ConsensusView,
CritiqueAnalysis,
CritiquePerspective,
CritiqueRequest,
CritiqueResponse,
SynthesisResponse,
ThinkingAugmentationResult,
)
class TestCritiquePerspective:
"""Test the CritiquePerspective literal type."""
def test_literal_values(self):
"""Test that literal type has correct values."""
# CritiquePerspective is now a Literal type, so we test valid values
valid_perspectives = ["positive", "neutral", "negative"]
for perspective in valid_perspectives:
assert perspective in ["positive", "neutral", "negative"]
def test_literal_type_annotation(self):
"""Test that CritiquePerspective is properly defined."""
# This will be validated at runtime when creating CritiqueResponse objects
# CritiquePerspective is now a type alias, not a class
assert CritiquePerspective is not None
class TestCritiqueRequest:
"""Test the CritiqueRequest model."""
def test_valid_request(self):
"""Test creating a valid critique request."""
proposal = "This is a valid proposal with sufficient length for testing."
request = CritiqueRequest(proposal=proposal)
assert request.proposal == proposal
def test_empty_proposal_fails(self):
"""Test that empty proposal fails validation."""
with pytest.raises(ValidationError) as exc_info:
CritiqueRequest(proposal="")
assert "at least 10 characters" in str(exc_info.value)
def test_short_proposal_fails(self):
"""Test that proposal with less than 10 characters fails."""
with pytest.raises(ValidationError) as exc_info:
CritiqueRequest(proposal="short")
assert "at least 10 characters" in str(exc_info.value)
def test_minimum_length_proposal(self):
"""Test that proposal with exactly 10 characters passes."""
request = CritiqueRequest(proposal="0123456789")
assert request.proposal == "0123456789"
class TestCritiqueAnalysis:
"""Test the CritiqueAnalysis model."""
def test_valid_analysis(self):
"""Test creating a valid critique analysis."""
analysis = CritiqueAnalysis(
feasibility="Highly feasible with current resources",
risks="Low risk with proper planning",
benefits="Significant benefits expected",
implementation="Straightforward implementation path",
stakeholder_impact="Positive impact on all stakeholders",
resource_requirements="Moderate resource requirements",
)
assert analysis.feasibility == "Highly feasible with current resources"
assert analysis.risks == "Low risk with proper planning"
assert analysis.benefits == "Significant benefits expected"
assert analysis.implementation == "Straightforward implementation path"
assert analysis.stakeholder_impact == "Positive impact on all stakeholders"
assert analysis.resource_requirements == "Moderate resource requirements"
def test_missing_fields_fail(self):
"""Test that missing required fields fail validation."""
with pytest.raises(ValidationError) as exc_info:
CritiqueAnalysis(
feasibility="Feasible",
risks="Low risk",
benefits="Good benefits",
# Missing implementation, stakeholder_impact, resource_requirements
) # type:ignore
assert "Field required" in str(exc_info.value)
class TestCritiqueResponse:
"""Test the CritiqueResponse model."""
def get_valid_critique_data(self):
"""Get valid data for creating CritiqueResponse."""
return {
"perspective": "positive",
"executive_summary": (
"This is a positive view of the proposal with strong potential."
),
"analysis": CritiqueAnalysis(
feasibility="Highly feasible with current resources",
risks="Low risk with proper planning",
benefits="Significant benefits expected",
implementation="Straightforward implementation path",
stakeholder_impact="Positive impact on all stakeholders",
resource_requirements="Moderate resource requirements",
),
"key_insights": [
"Strong alignment with organizational goals",
"Clear implementation pathway identified",
"Significant positive ROI expected",
],
"confidence_level": 0.85,
}
def test_valid_critique_response(self):
"""Test creating a valid critique response."""
data = self.get_valid_critique_data()
response = CritiqueResponse(**data)
assert response.perspective == "positive"
assert response.executive_summary == data["executive_summary"]
assert response.analysis == data["analysis"]
assert response.key_insights == data["key_insights"]
assert response.confidence_level == data["confidence_level"]
def test_confidence_level_constraints(self):
"""Test confidence level validation constraints."""
data = self.get_valid_critique_data()
# Test below range
data["confidence_level"] = -0.1
with pytest.raises(ValidationError) as exc_info:
CritiqueResponse(**data)
assert "greater than or equal to 0" in str(exc_info.value)
# Test above range
data["confidence_level"] = 1.1
with pytest.raises(ValidationError) as exc_info:
CritiqueResponse(**data)
assert "less than or equal to 1" in str(exc_info.value)
# Test valid boundaries
data["confidence_level"] = 0.0
response = CritiqueResponse(**data)
assert response.confidence_level == 0.0
data["confidence_level"] = 1.0
response = CritiqueResponse(**data)
assert response.confidence_level == 1.0
def test_key_insights_length_constraints(self):
"""Test key insights length validation."""
data = self.get_valid_critique_data()
# Test too few insights
data["key_insights"] = ["Only one insight"]
with pytest.raises(ValidationError) as exc_info:
CritiqueResponse(**data)
assert "at least 3 items" in str(exc_info.value)
# Test too many insights
data["key_insights"] = [
"Insight 1",
"Insight 2",
"Insight 3",
"Insight 4",
"Insight 5",
"Insight 6",
]
with pytest.raises(ValidationError) as exc_info:
CritiqueResponse(**data)
assert "at most 5 items" in str(exc_info.value)
# Test valid boundaries
data["key_insights"] = ["Insight 1", "Insight 2", "Insight 3"]
response = CritiqueResponse(**data)
assert len(response.key_insights) == 3
data["key_insights"] = [
"Insight 1",
"Insight 2",
"Insight 3",
"Insight 4",
"Insight 5",
]
response = CritiqueResponse(**data)
assert len(response.key_insights) == 5
class TestConsensusView:
"""Test the ConsensusView model."""
def test_valid_consensus_view(self):
"""Test creating a valid consensus view."""
consensus = ConsensusView(
areas_of_agreement=[
"All perspectives agree on feasibility",
"Strong potential for positive outcomes",
],
areas_of_disagreement=[
"Timeline estimates vary significantly",
"Resource requirements disputed",
],
balanced_assessment="Overall positive with some implementation concerns",
critical_considerations=[
"Resource allocation planning",
"Timeline management",
"Stakeholder communication",
],
)
assert len(consensus.areas_of_agreement) == 2
assert len(consensus.areas_of_disagreement) == 2
assert len(consensus.critical_considerations) == 3
assert "Overall positive" in consensus.balanced_assessment
class TestSynthesisResponse:
"""Test the SynthesisResponse model."""
def test_valid_synthesis_response(self):
"""Test creating a valid synthesis response."""
synthesis = SynthesisResponse(
executive_summary=(
"Comprehensive analysis shows positive potential with manageable risks"
),
consensus_view=ConsensusView(
areas_of_agreement=["Feasible proposal", "Good ROI potential"],
areas_of_disagreement=["Implementation timeline", "Resource needs"],
balanced_assessment="Positive overall with implementation concerns",
critical_considerations=["Resource planning", "Timeline management"],
),
recommendation="Proceed with phased implementation approach",
next_steps=[
"Conduct detailed resource planning",
"Develop implementation timeline",
"Engage key stakeholders",
],
uncertainty_flags=[
"Market conditions may affect timeline",
"Resource availability uncertain",
],
overall_confidence=0.75,
)
assert "positive potential" in synthesis.executive_summary
assert synthesis.recommendation == "Proceed with phased implementation approach"
assert len(synthesis.next_steps) == 3
assert len(synthesis.uncertainty_flags) == 2
assert synthesis.overall_confidence == 0.75
def test_empty_next_steps_fails(self):
"""Test that empty next_steps fails validation."""
with pytest.raises(ValidationError) as exc_info:
SynthesisResponse(
executive_summary="Test summary",
consensus_view=ConsensusView(
areas_of_agreement=["Agreement"],
areas_of_disagreement=["Disagreement"],
balanced_assessment="Balanced",
critical_considerations=["Critical"],
),
recommendation="Test recommendation",
next_steps=[], # Empty list should fail
overall_confidence=0.8,
)
assert "at least 1 item" in str(exc_info.value)
def test_uncertainty_flags_optional(self):
"""Test that uncertainty_flags is optional."""
synthesis = SynthesisResponse(
executive_summary="Test summary",
consensus_view=ConsensusView(
areas_of_agreement=["Agreement"],
areas_of_disagreement=["Disagreement"],
balanced_assessment="Balanced",
critical_considerations=["Critical"],
),
recommendation="Test recommendation",
next_steps=["Step 1"],
overall_confidence=0.8,
# No uncertainty_flags provided
)
assert synthesis.uncertainty_flags == []
class TestThinkingAugmentationResult:
"""Test the ThinkingAugmentationResult model."""
def get_valid_result_data(self):
"""Get valid data for creating ThinkingAugmentationResult."""
critiques = []
for perspective in ["positive", "neutral", "negative"]:
critique = CritiqueResponse(
perspective=perspective, # type:ignore
executive_summary=f"This is a {perspective} perspective",
analysis=CritiqueAnalysis(
feasibility="Analysis of feasibility",
risks="Risk analysis",
benefits="Benefits analysis",
implementation="Implementation analysis",
stakeholder_impact="Stakeholder impact analysis",
resource_requirements="Resource requirements analysis",
),
key_insights=[
f"{perspective.title()} insight 1",
f"{perspective.title()} insight 2",
f"{perspective.title()} insight 3",
],
confidence_level=0.8,
)
critiques.append(critique)
return {
"original_proposal": "Test proposal for comprehensive analysis",
"critiques": critiques,
"synthesis": SynthesisResponse(
executive_summary="Comprehensive synthesis of all perspectives",
consensus_view=ConsensusView(
areas_of_agreement=["Generally feasible"],
areas_of_disagreement=["Implementation approach"],
balanced_assessment="Mixed but generally positive",
critical_considerations=["Resource planning"],
),
recommendation="Proceed with caution",
next_steps=["Plan implementation"],
overall_confidence=0.75,
),
"processing_metadata": {
"processing_time": "5.2 seconds",
"model_version": "test-model",
},
}
def test_valid_thinking_augmentation_result(self):
"""Test creating a valid thinking augmentation result."""
data = self.get_valid_result_data()
result = ThinkingAugmentationResult(**data)
assert result.original_proposal == data["original_proposal"]
assert len(result.critiques) == 3
assert result.synthesis == data["synthesis"]
assert result.processing_metadata == data["processing_metadata"]
def test_wrong_number_of_critiques_fails(self):
"""Test that wrong number of critiques fails validation."""
data = self.get_valid_result_data()
# Test too few critiques
data["critiques"] = data["critiques"][:2] # Only 2 critiques
with pytest.raises(ValidationError) as exc_info:
ThinkingAugmentationResult(**data)
assert "at least 3 items" in str(exc_info.value)
# Test too many critiques
data = self.get_valid_result_data()
extra_critique = CritiqueResponse(
perspective="positive",
executive_summary="Extra critique",
analysis=CritiqueAnalysis(
feasibility="Extra",
risks="Extra",
benefits="Extra",
implementation="Extra",
stakeholder_impact="Extra",
resource_requirements="Extra",
),
key_insights=["Extra 1", "Extra 2", "Extra 3"],
confidence_level=0.8,
)
data["critiques"].append(extra_critique) # 4 critiques
with pytest.raises(ValidationError) as exc_info:
ThinkingAugmentationResult(**data)
assert "at most 3 items" in str(exc_info.value)
def test_processing_metadata_optional(self):
"""Test that processing_metadata is optional."""
data = self.get_valid_result_data()
del data["processing_metadata"]
result = ThinkingAugmentationResult(**data)
assert result.processing_metadata == {}