Skip to main content
Glama

MCP Stock Details Server

by whdghk1907
test_analyst_consensus_analyzer.pyβ€’14.8 kB
""" Test cases for AnalystConsensusAnalyzer class (TDD Red Phase) """ import pytest from unittest.mock import AsyncMock, patch from typing import Dict, Any # Mock the module before importing import sys from unittest.mock import MagicMock sys.modules['src.tools.analyst_consensus_tools'] = MagicMock() from src.tools.analyst_consensus_tools import AnalystConsensusAnalyzer from src.exceptions import InsufficientDataError class TestAnalystConsensusAnalyzer: """Test cases for AnalystConsensusAnalyzer""" @pytest.fixture def consensus_analyzer(self): """Create AnalystConsensusAnalyzer instance""" from src.tools.analyst_consensus_tools import AnalystConsensusAnalyzer return AnalystConsensusAnalyzer() @pytest.fixture def sample_consensus_data(self): """Sample consensus data for testing""" return { "company_code": "005930", "company_name": "μ‚Όμ„±μ „μž", "current_price": 75000, "analyst_opinions": [ { "analyst_id": "A001", "firm": "μ‚Όμ„±μ¦κΆŒ", "analyst_name": "김동원", "rating": "Buy", "target_price": 85000, "date": "2024-01-15", "eps_estimate_cy": 4850, "eps_estimate_ny": 5200 }, { "analyst_id": "A002", "firm": "λ―Έλž˜μ—μ…‹μ¦κΆŒ", "analyst_name": "λ°•μ„œμ—°", "rating": "Hold", "target_price": 78000, "date": "2024-01-12", "eps_estimate_cy": 4700, "eps_estimate_ny": 5100 }, { "analyst_id": "A003", "firm": "KB증ꢌ", "analyst_name": "이민수", "rating": "Buy", "target_price": 90000, "date": "2024-01-10", "eps_estimate_cy": 5000, "eps_estimate_ny": 5300 } ] } @pytest.fixture def sample_revision_data(self): """Sample revision data for testing""" return [ { "analyst_id": "A001", "revision_date": "2024-01-15", "revision_type": "upgrade", "old_rating": "Hold", "new_rating": "Buy", "old_target": 80000, "new_target": 85000, "revision_reason": "Improved semiconductor outlook" }, { "analyst_id": "A004", "revision_date": "2024-01-08", "revision_type": "eps_revision", "old_eps_cy": 4600, "new_eps_cy": 4850, "revision_direction": "up" } ] @pytest.mark.asyncio async def test_calculate_target_price_consensus(self, consensus_analyzer, sample_consensus_data): """Test target price consensus calculation""" consensus = await consensus_analyzer.calculate_target_price_consensus( consensus_data=sample_consensus_data ) assert consensus is not None assert isinstance(consensus, dict) # Should contain target price statistics assert "mean_target_price" in consensus assert "median_target_price" in consensus assert "high_target" in consensus assert "low_target" in consensus assert "price_upside_downside" in consensus assert "analysts_count" in consensus # Check calculations assert consensus["analysts_count"] == 3 assert consensus["mean_target_price"] == 84333.33 # (85000+78000+90000)/3 assert consensus["median_target_price"] == 85000 assert consensus["high_target"] == 90000 assert consensus["low_target"] == 78000 # Price upside should be calculated correctly upside = consensus["price_upside_downside"] expected_upside = (84333.33 - 75000) / 75000 * 100 assert abs(upside - expected_upside) < 0.1 @pytest.mark.asyncio async def test_analyze_investment_opinions(self, consensus_analyzer, sample_consensus_data): """Test investment opinions analysis""" opinion_analysis = await consensus_analyzer.analyze_investment_opinions( consensus_data=sample_consensus_data ) assert opinion_analysis is not None assert isinstance(opinion_analysis, dict) # Should contain opinion distribution assert "opinion_distribution" in opinion_analysis assert "consensus_rating" in opinion_analysis assert "consensus_strength" in opinion_analysis # Check opinion counts distribution = opinion_analysis["opinion_distribution"] assert distribution["buy"] == 2 # 2 Buy ratings assert distribution["hold"] == 1 # 1 Hold rating assert distribution["sell"] == 0 # 0 Sell ratings assert distribution["total"] == 3 # Check percentages assert distribution["buy_percentage"] == 66.7 # 2/3 assert distribution["hold_percentage"] == 33.3 # 1/3 @pytest.mark.asyncio async def test_calculate_earnings_estimates(self, consensus_analyzer, sample_consensus_data): """Test earnings estimates calculation""" estimates = await consensus_analyzer.calculate_earnings_estimates( consensus_data=sample_consensus_data ) assert estimates is not None assert isinstance(estimates, dict) # Should contain current and next year estimates assert "current_year" in estimates assert "next_year" in estimates assert "estimate_dispersion" in estimates # Check current year estimates cy_estimates = estimates["current_year"] assert "mean_eps" in cy_estimates assert "median_eps" in cy_estimates assert "high_estimate" in cy_estimates assert "low_estimate" in cy_estimates # Verify calculations expected_mean_eps = (4850 + 4700 + 5000) / 3 assert abs(cy_estimates["mean_eps"] - expected_mean_eps) < 0.1 @pytest.mark.asyncio async def test_track_analyst_revisions(self, consensus_analyzer, sample_revision_data): """Test analyst revisions tracking""" revisions = await consensus_analyzer.track_analyst_revisions( revision_data=sample_revision_data, tracking_period="3M" ) assert revisions is not None assert isinstance(revisions, dict) # Should contain revision summary assert "revision_summary" in revisions assert "rating_changes" in revisions assert "eps_revisions" in revisions # Check revision counts summary = revisions["revision_summary"] assert "total_revisions" in summary assert "upgrades" in summary assert "downgrades" in summary assert "eps_revisions_up" in summary assert "eps_revisions_down" in summary @pytest.mark.asyncio async def test_assess_consensus_quality(self, consensus_analyzer, sample_consensus_data): """Test consensus quality assessment""" quality_assessment = await consensus_analyzer.assess_consensus_quality( consensus_data=sample_consensus_data ) assert quality_assessment is not None assert isinstance(quality_assessment, dict) # Should contain quality metrics assert "coverage_quality" in quality_assessment assert "estimate_dispersion" in quality_assessment assert "recency_score" in quality_assessment assert "firm_diversity" in quality_assessment # Check quality scores coverage = quality_assessment["coverage_quality"] assert "analyst_count" in coverage assert "tier1_coverage" in coverage assert "coverage_score" in coverage @pytest.mark.asyncio async def test_generate_consensus_insights(self, consensus_analyzer, sample_consensus_data): """Test consensus insights generation""" insights = await consensus_analyzer.generate_consensus_insights( consensus_data=sample_consensus_data, target_price_consensus={"mean_target_price": 84333, "price_upside_downside": 12.4}, opinion_analysis={"buy_percentage": 66.7, "consensus_rating": "Buy"} ) assert insights is not None assert isinstance(insights, dict) # Should contain insights assert "key_insights" in insights assert "investment_implications" in insights assert "consensus_strength" in insights # Check insights content key_insights = insights["key_insights"] assert isinstance(key_insights, list) assert len(key_insights) > 0 @pytest.mark.asyncio async def test_analyze_earnings_surprises(self, consensus_analyzer): """Test earnings surprise analysis""" surprise_data = [ { "quarter": "2023Q4", "estimated_eps": 4200, "actual_eps": 4500, "report_date": "2024-01-25" }, { "quarter": "2023Q3", "estimated_eps": 3800, "actual_eps": 3900, "report_date": "2023-10-26" } ] surprise_analysis = await consensus_analyzer.analyze_earnings_surprises( surprise_data=surprise_data, periods=8 ) assert surprise_analysis is not None assert isinstance(surprise_analysis, dict) # Should contain surprise metrics assert "surprise_history" in surprise_analysis assert "surprise_statistics" in surprise_analysis # Check surprise calculations history = surprise_analysis["surprise_history"] assert len(history) == 2 # Verify surprise percentage calculations q4_surprise = next(s for s in history if s["quarter"] == "2023Q4") expected_surprise = (4500 - 4200) / 4200 * 100 assert abs(q4_surprise["surprise_percent"] - expected_surprise) < 0.1 @pytest.mark.asyncio async def test_benchmark_consensus_accuracy(self, consensus_analyzer): """Test consensus accuracy benchmarking""" historical_data = [ { "period": "2023", "consensus_target": 80000, "actual_price_range": {"high": 85000, "low": 72000, "year_end": 75000}, "consensus_eps": 4500, "actual_eps": 4200 } ] accuracy = await consensus_analyzer.benchmark_consensus_accuracy( historical_data=historical_data ) assert accuracy is not None assert isinstance(accuracy, dict) # Should contain accuracy metrics assert "target_price_accuracy" in accuracy assert "eps_accuracy" in accuracy assert "overall_accuracy_score" in accuracy @pytest.mark.asyncio async def test_comprehensive_consensus_analysis(self, consensus_analyzer): """Test comprehensive consensus analysis""" comprehensive = await consensus_analyzer.comprehensive_consensus_analysis( company_code="005930", include_revisions=True, include_surprise_history=True, include_accuracy_metrics=True ) assert comprehensive is not None assert isinstance(comprehensive, dict) # Should contain comprehensive analysis sections expected_sections = [ "consensus_overview", "target_price_analysis", "opinion_analysis", "earnings_estimates", "consensus_insights" ] for section in expected_sections: if section in comprehensive: assert comprehensive[section] is not None @pytest.mark.asyncio async def test_compare_consensus_changes(self, consensus_analyzer): """Test consensus changes comparison""" current_consensus = { "mean_target_price": 84333, "buy_percentage": 66.7, "mean_eps_cy": 4850 } previous_consensus = { "mean_target_price": 82000, "buy_percentage": 60.0, "mean_eps_cy": 4600 } changes = await consensus_analyzer.compare_consensus_changes( current_consensus=current_consensus, previous_consensus=previous_consensus, comparison_period="1M" ) assert changes is not None assert isinstance(changes, dict) # Should contain change metrics assert "target_price_change" in changes assert "opinion_change" in changes assert "estimate_changes" in changes # Check calculations tp_change = changes["target_price_change"] expected_change = (84333 - 82000) / 82000 * 100 assert abs(tp_change["percent_change"] - expected_change) < 0.1 @pytest.mark.asyncio async def test_error_handling_insufficient_data(self, consensus_analyzer): """Test error handling with insufficient data""" insufficient_data = { "company_code": "005930", "analyst_opinions": [] # Empty opinions } with pytest.raises(InsufficientDataError): await consensus_analyzer.calculate_target_price_consensus( consensus_data=insufficient_data ) @pytest.mark.asyncio async def test_error_handling_invalid_parameters(self, consensus_analyzer, sample_consensus_data): """Test error handling with invalid parameters""" # Test with invalid tracking period with pytest.raises(ValueError): await consensus_analyzer.track_analyst_revisions( revision_data=[], tracking_period="invalid_period" ) # Test with negative periods with pytest.raises(ValueError): await consensus_analyzer.analyze_earnings_surprises( surprise_data=[], periods=-1 )

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/whdghk1907/mcp-stock-details'

If you have feedback or need assistance with the MCP directory API, please join our Discord server