test_peer_comparison_analyzer.pyβ’13.6 kB
"""
Test cases for PeerComparisonAnalyzer class (TDD Red Phase)
"""
import pytest
from unittest.mock import AsyncMock, patch
from typing import Dict, Any
# Mock the module before importing
import sys
from unittest.mock import MagicMock
sys.modules['src.tools.peer_comparison_tools'] = MagicMock()
from src.tools.peer_comparison_tools import PeerComparisonAnalyzer
from src.exceptions import InsufficientDataError
class TestPeerComparisonAnalyzer:
"""Test cases for PeerComparisonAnalyzer"""
@pytest.fixture
def peer_analyzer(self):
"""Create PeerComparisonAnalyzer instance"""
from src.tools.peer_comparison_tools import PeerComparisonAnalyzer
return PeerComparisonAnalyzer()
@pytest.fixture
def sample_company_data(self):
"""Sample company data for testing"""
return {
"company_code": "005930",
"company_name": "μΌμ±μ μ",
"market_cap": 450000000000000,
"industry": "λ°λ체",
"sector": "μ 보기μ ",
"financial_metrics": {
"revenue_2023": 302231000000000,
"operating_profit_2023": 46007000000000,
"net_income_2023": 26016000000000,
"total_assets": 426950000000000,
"total_equity": 302777000000000,
"operating_margin": 15.2,
"net_margin": 8.6,
"roe": 8.6,
"roa": 6.1,
"debt_to_equity": 0.41
}
}
@pytest.fixture
def sample_peer_data(self):
"""Sample peer companies data"""
return [
{
"company_code": "000660",
"company_name": "SKνμ΄λμ€",
"market_cap": 95000000000000,
"industry": "λ°λ체",
"financial_metrics": {
"revenue_2023": 55754000000000,
"operating_profit_2023": 1806000000000,
"net_income_2023": 8092000000000,
"operating_margin": 3.2,
"net_margin": 14.5,
"roe": 12.8,
"roa": 8.2
}
},
{
"company_code": "035420",
"company_name": "NAVER",
"market_cap": 45000000000000,
"industry": "μΈν°λ·μλΉμ€",
"financial_metrics": {
"revenue_2023": 8753000000000,
"operating_profit_2023": 1521000000000,
"net_income_2023": 2172000000000,
"operating_margin": 17.4,
"net_margin": 24.8,
"roe": 15.2,
"roa": 11.8
}
}
]
@pytest.mark.asyncio
async def test_identify_peer_companies(self, peer_analyzer, sample_company_data):
"""Test peer company identification"""
peer_companies = await peer_analyzer.identify_peer_companies(
company_data=sample_company_data,
selection_method="industry_focused",
max_peers=5
)
assert peer_companies is not None
assert isinstance(peer_companies, dict)
# Should contain peer identification results
assert "peer_companies" in peer_companies
assert "selection_criteria" in peer_companies
assert "similarity_scores" in peer_companies
# Should identify relevant peers
peer_list = peer_companies["peer_companies"]
assert len(peer_list) <= 5
# Check that peers have required fields
for peer in peer_list:
assert "company_code" in peer
assert "company_name" in peer
assert "similarity_score" in peer
@pytest.mark.asyncio
async def test_compare_financial_metrics(self, peer_analyzer, sample_company_data, sample_peer_data):
"""Test financial metrics comparison"""
comparison = await peer_analyzer.compare_financial_metrics(
company_data=sample_company_data,
peer_data=sample_peer_data,
metrics=["revenue", "operating_margin", "roe", "roa"]
)
assert comparison is not None
assert isinstance(comparison, dict)
# Should contain financial comparison
assert "financial_comparison" in comparison
assert "peer_statistics" in comparison
assert "percentile_ranks" in comparison
# Check specific metrics
financial_comp = comparison["financial_comparison"]
assert "operating_margin" in financial_comp
assert "roe" in financial_comp
# Should calculate peer averages and medians
for metric in ["operating_margin", "roe"]:
metric_data = financial_comp[metric]
assert "company_value" in metric_data
assert "peer_average" in metric_data
assert "peer_median" in metric_data
assert "percentile" in metric_data
@pytest.mark.asyncio
async def test_analyze_valuation_metrics(self, peer_analyzer, sample_company_data, sample_peer_data):
"""Test valuation metrics analysis"""
valuation_analysis = await peer_analyzer.analyze_valuation_metrics(
company_data=sample_company_data,
peer_data=sample_peer_data
)
assert valuation_analysis is not None
assert isinstance(valuation_analysis, dict)
# Should contain valuation analysis
assert "valuation_comparison" in valuation_analysis
assert "relative_valuation" in valuation_analysis
# Check valuation metrics
valuation_comp = valuation_analysis["valuation_comparison"]
expected_metrics = ["per", "pbr", "ev_ebitda", "price_to_sales"]
for metric in expected_metrics:
if metric in valuation_comp:
metric_data = valuation_comp[metric]
assert "company_value" in metric_data
assert "peer_average" in metric_data
assert "relative_premium_discount" in metric_data
@pytest.mark.asyncio
async def test_calculate_market_position(self, peer_analyzer, sample_company_data, sample_peer_data):
"""Test market position calculation"""
market_position = await peer_analyzer.calculate_market_position(
company_data=sample_company_data,
peer_data=sample_peer_data
)
assert market_position is not None
assert isinstance(market_position, dict)
# Should contain market position metrics
assert "market_position" in market_position
assert "competitive_ranking" in market_position
# Check ranking metrics
ranking = market_position["competitive_ranking"]
assert "revenue_rank" in ranking
assert "market_cap_rank" in ranking
assert "profitability_rank" in ranking
# Should provide competitive insights
if "competitive_insights" in market_position:
insights = market_position["competitive_insights"]
assert isinstance(insights, list)
@pytest.mark.asyncio
async def test_assess_competitive_advantages(self, peer_analyzer, sample_company_data, sample_peer_data):
"""Test competitive advantages assessment"""
competitive_analysis = await peer_analyzer.assess_competitive_advantages(
company_data=sample_company_data,
peer_data=sample_peer_data
)
assert competitive_analysis is not None
assert isinstance(competitive_analysis, dict)
# Should contain competitive analysis
assert "competitive_advantages" in competitive_analysis
assert "competitive_weaknesses" in competitive_analysis
assert "strategic_recommendations" in competitive_analysis
# Check advantages and weaknesses
advantages = competitive_analysis["competitive_advantages"]
weaknesses = competitive_analysis["competitive_weaknesses"]
assert isinstance(advantages, list)
assert isinstance(weaknesses, list)
@pytest.mark.asyncio
async def test_generate_peer_insights(self, peer_analyzer, sample_company_data, sample_peer_data):
"""Test peer insights generation"""
insights = await peer_analyzer.generate_peer_insights(
company_data=sample_company_data,
peer_data=sample_peer_data,
comparison_results={
"financial_comparison": {"operating_margin": {"percentile": 35}},
"valuation_comparison": {"per": {"relative_premium_discount": -15.2}}
}
)
assert insights is not None
assert isinstance(insights, dict)
# Should contain insights
assert "key_insights" in insights
assert "investment_implications" in insights
# Check insights content
key_insights = insights["key_insights"]
assert isinstance(key_insights, list)
assert len(key_insights) > 0
@pytest.mark.asyncio
async def test_benchmark_against_sector(self, peer_analyzer, sample_company_data):
"""Test sector benchmarking"""
sector_benchmark = await peer_analyzer.benchmark_against_sector(
company_data=sample_company_data,
sector="μ 보기μ ",
benchmark_metrics=["roe", "operating_margin", "debt_to_equity"]
)
assert sector_benchmark is not None
assert isinstance(sector_benchmark, dict)
# Should contain sector benchmark
assert "sector_benchmark" in sector_benchmark
assert "sector_statistics" in sector_benchmark
# Check benchmark metrics
benchmark = sector_benchmark["sector_benchmark"]
for metric in ["roe", "operating_margin"]:
if metric in benchmark:
assert "company_value" in benchmark[metric]
assert "sector_average" in benchmark[metric]
assert "sector_percentile" in benchmark[metric]
@pytest.mark.asyncio
async def test_comprehensive_peer_analysis(self, peer_analyzer, sample_company_data):
"""Test comprehensive peer analysis"""
comprehensive = await peer_analyzer.comprehensive_peer_analysis(
company_code="005930",
include_valuation=True,
include_growth_comparison=True,
max_peers=8
)
assert comprehensive is not None
assert isinstance(comprehensive, dict)
# Should contain comprehensive analysis sections
expected_sections = [
"peer_overview",
"financial_comparison",
"valuation_analysis",
"market_position",
"strategic_insights"
]
for section in expected_sections:
if section in comprehensive:
assert comprehensive[section] is not None
@pytest.mark.asyncio
async def test_calculate_peer_similarity(self, peer_analyzer, sample_company_data):
"""Test peer similarity calculation"""
candidate_peer = {
"company_code": "000660",
"industry": "λ°λ체",
"market_cap": 95000000000000,
"revenue": 55754000000000
}
similarity = await peer_analyzer.calculate_peer_similarity(
company_data=sample_company_data,
candidate_peer=candidate_peer
)
assert similarity is not None
assert isinstance(similarity, dict)
# Should contain similarity metrics
assert "overall_similarity" in similarity
assert "industry_match" in similarity
assert "size_similarity" in similarity
# Overall similarity should be between 0 and 1
overall_score = similarity["overall_similarity"]
assert 0 <= overall_score <= 1
@pytest.mark.asyncio
async def test_error_handling_insufficient_data(self, peer_analyzer):
"""Test error handling with insufficient data"""
insufficient_data = {
"company_code": "005930",
"company_name": "μΌμ±μ μ"
# Missing financial metrics
}
with pytest.raises(InsufficientDataError):
await peer_analyzer.compare_financial_metrics(
company_data=insufficient_data,
peer_data=[],
metrics=["revenue", "operating_margin"]
)
@pytest.mark.asyncio
async def test_error_handling_invalid_parameters(self, peer_analyzer, sample_company_data):
"""Test error handling with invalid parameters"""
# Test with invalid peer selection method
with pytest.raises(ValueError):
await peer_analyzer.identify_peer_companies(
company_data=sample_company_data,
selection_method="invalid_method",
max_peers=5
)
# Test with negative max_peers
with pytest.raises(ValueError):
await peer_analyzer.identify_peer_companies(
company_data=sample_company_data,
selection_method="industry_focused",
max_peers=-1
)