test_multi_timeframe_analyzer.pyโข21.9 kB
"""๋ฉํฐ ํ์ํ๋ ์ ๋ถ์๊ธฐ ํ
์คํธ"""
import pytest
from datetime import datetime, timedelta
from src.analytics.multi_timeframe_analyzer import MultiTimeframeAnalyzer
from src.exceptions import TimeframeError, AnalysisError, InsufficientDataError
class TestMultiTimeframeAnalyzer:
"""๋ฉํฐ ํ์ํ๋ ์ ๋ถ์๊ธฐ ํ
์คํธ ํด๋์ค"""
@pytest.fixture
def analyzer_config(self):
"""๋ถ์๊ธฐ ์ค์ """
return {
"timeframes": ["1m", "5m", "15m", "1h", "4h", "1d"],
"aggregation_methods": {
"volume": "sum",
"close": "last",
"high": "max",
"low": "min",
"open": "first"
},
"analysis_types": [
"trend_alignment",
"momentum_divergence",
"volume_profile",
"support_resistance"
],
"correlation_thresholds": {
"strong": 0.8,
"moderate": 0.6,
"weak": 0.3
}
}
@pytest.fixture
def multi_timeframe_analyzer(self, analyzer_config):
"""๋ฉํฐ ํ์ํ๋ ์ ๋ถ์๊ธฐ ์ธ์คํด์ค"""
return MultiTimeframeAnalyzer(analyzer_config)
@pytest.fixture
def sample_ohlcv_data(self):
"""์ํ OHLCV ๋ฐ์ดํฐ (1๋ถ๋ด)"""
data = []
base_price = 50000
base_time = datetime(2024, 1, 1, 0, 0, 0)
for i in range(1440): # 24์๊ฐ = 1440๋ถ
time = base_time + timedelta(minutes=i)
# ๊ฐ๊ฒฉ ๋ณ๋ ์๋ฎฌ๋ ์ด์
trend = (i // 60) * 100 # ์๊ฐ๋ณ ํธ๋ ๋
wave = 500 * ((i % 60) / 60) # ๋ถ๋ณ ๋ณ๋
noise = (i % 5) * 20 # ๋
ธ์ด์ฆ
close = base_price + trend + wave + noise
high = close + abs(noise) * 2
low = close - abs(noise) * 1.5
open_price = close - noise
volume = 1000000 + (i % 30) * 50000
data.append({
"timestamp": time.isoformat(),
"open": open_price,
"high": high,
"low": low,
"close": close,
"volume": volume
})
return data
@pytest.fixture
def multi_symbol_data(self):
"""์ฌ๋ฌ ์ฌ๋ณผ์ ๋ฐ์ดํฐ"""
symbols = ["BTC/USD", "ETH/USD", "SOL/USD"]
data = {}
for symbol in symbols:
base_price = 50000 if "BTC" in symbol else 3000 if "ETH" in symbol else 100
symbol_data = []
for i in range(100):
symbol_data.append({
"timestamp": (datetime(2024, 1, 1, 0, 0, 0) + timedelta(hours=i)).isoformat(),
"symbol": symbol,
"close": base_price + i * 10,
"volume": 1000000 + i * 10000
})
data[symbol] = symbol_data
return data
@pytest.mark.asyncio
async def test_timeframe_aggregation(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""ํ์ํ๋ ์ ์ง๊ณ ํ
์คํธ"""
# 5๋ถ๋ด์ผ๋ก ์ง๊ณ
aggregated_5m = await multi_timeframe_analyzer.aggregate_to_timeframe(
sample_ohlcv_data,
target_timeframe="5m"
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert len(aggregated_5m) == 288 # 24์๊ฐ = 288๊ฐ 5๋ถ๋ด
# ์ฒซ ๋ฒ์งธ 5๋ถ๋ด ๊ฒ์ฆ
first_candle = aggregated_5m[0]
assert "open" in first_candle
assert "high" in first_candle
assert "low" in first_candle
assert "close" in first_candle
assert "volume" in first_candle
# ์ง๊ณ ๊ท์น ๊ฒ์ฆ
# open์ ์ฒซ ๋ฒ์งธ ๊ฐ
assert first_candle["open"] == sample_ohlcv_data[0]["open"]
# close๋ ๋ง์ง๋ง ๊ฐ
assert first_candle["close"] == sample_ohlcv_data[4]["close"]
# high๋ ์ต๋๊ฐ
max_high = max(d["high"] for d in sample_ohlcv_data[:5])
assert first_candle["high"] == max_high
# low๋ ์ต์๊ฐ
min_low = min(d["low"] for d in sample_ohlcv_data[:5])
assert first_candle["low"] == min_low
# volume์ ํฉ๊ณ
sum_volume = sum(d["volume"] for d in sample_ohlcv_data[:5])
assert first_candle["volume"] == sum_volume
@pytest.mark.asyncio
async def test_multi_timeframe_analysis(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""๋ฉํฐ ํ์ํ๋ ์ ๋ถ์ ํ
์คํธ"""
# ์ฌ๋ฌ ํ์ํ๋ ์ ๋ถ์
analysis_results = await multi_timeframe_analyzer.analyze_timeframes(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"]
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "timeframe_data" in analysis_results
assert "trend_alignment" in analysis_results
assert "momentum_analysis" in analysis_results
assert "volume_profile" in analysis_results
# ํ์ํ๋ ์๋ณ ๋ฐ์ดํฐ ํ์ธ
tf_data = analysis_results["timeframe_data"]
assert "5m" in tf_data
assert "15m" in tf_data
assert "1h" in tf_data
# ํธ๋ ๋ ์ ๋ ฌ ํ์ธ
trend_alignment = analysis_results["trend_alignment"]
assert "alignment_score" in trend_alignment
assert "timeframe_trends" in trend_alignment
assert 0 <= trend_alignment["alignment_score"] <= 1
@pytest.mark.asyncio
async def test_trend_alignment_detection(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""ํธ๋ ๋ ์ ๋ ฌ ๊ฐ์ง ํ
์คํธ"""
# ํธ๋ ๋ ์ ๋ ฌ ๋ถ์
trend_alignment = await multi_timeframe_analyzer.detect_trend_alignment(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"],
lookback_periods={"5m": 20, "15m": 20, "1h": 20}
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "aligned" in trend_alignment
assert "direction" in trend_alignment
assert "strength" in trend_alignment
assert "timeframe_trends" in trend_alignment
# ํ์ํ๋ ์๋ณ ํธ๋ ๋
tf_trends = trend_alignment["timeframe_trends"]
for tf in ["5m", "15m", "1h"]:
assert tf in tf_trends
assert "direction" in tf_trends[tf]
assert "strength" in tf_trends[tf]
assert tf_trends[tf]["direction"] in ["up", "down", "neutral"]
@pytest.mark.asyncio
async def test_momentum_divergence_analysis(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""๋ชจ๋ฉํ
๋ค์ด๋ฒ์ ์ค ๋ถ์ ํ
์คํธ"""
# ๋ชจ๋ฉํ
๋ค์ด๋ฒ์ ์ค ๋ถ์
divergence_results = await multi_timeframe_analyzer.analyze_momentum_divergence(
sample_ohlcv_data,
timeframes=["5m", "15m"],
momentum_indicator="rsi"
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "divergences" in divergence_results
assert "momentum_values" in divergence_results
assert "divergence_strength" in divergence_results
# ๋ค์ด๋ฒ์ ์ค ๊ฒ์ฆ
divergences = divergence_results["divergences"]
for div in divergences:
assert "type" in div # bullish_divergence, bearish_divergence
assert "timeframe" in div
assert "index" in div
assert "strength" in div
@pytest.mark.asyncio
async def test_volume_profile_analysis(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""๋ณผ๋ฅจ ํ๋กํ์ผ ๋ถ์ ํ
์คํธ"""
# ๋ณผ๋ฅจ ํ๋กํ์ผ ๋ถ์
volume_profile = await multi_timeframe_analyzer.analyze_volume_profile(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"],
profile_bins=20
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "price_levels" in volume_profile
assert "volume_distribution" in volume_profile
assert "poc" in volume_profile # Point of Control
assert "value_area" in volume_profile
# POC ๊ฒ์ฆ
poc = volume_profile["poc"]
assert "price" in poc
assert "volume" in poc
# ๊ฐ์น ์์ญ ๊ฒ์ฆ
value_area = volume_profile["value_area"]
assert "high" in value_area
assert "low" in value_area
assert "volume_percentage" in value_area
assert 0 <= value_area["volume_percentage"] <= 1
@pytest.mark.asyncio
async def test_support_resistance_across_timeframes(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""ํ์ํ๋ ์๋ณ ์ง์ง/์ ํญ ๋ถ์ ํ
์คํธ"""
# ์ง์ง/์ ํญ ๋ ๋ฒจ ๋ถ์
sr_levels = await multi_timeframe_analyzer.find_support_resistance_levels(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"],
min_touches=2
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "levels" in sr_levels
assert "timeframe_levels" in sr_levels
assert "confluence_zones" in sr_levels
# ๋ ๋ฒจ ๊ฒ์ฆ
levels = sr_levels["levels"]
assert len(levels) > 0
for level in levels:
assert "price" in level
assert "type" in level # support or resistance
assert "strength" in level
assert "timeframes" in level
assert len(level["timeframes"]) > 0
@pytest.mark.asyncio
async def test_correlation_matrix_generation(self, multi_timeframe_analyzer, multi_symbol_data):
"""์๊ด๊ด๊ณ ๋งคํธ๋ฆญ์ค ์์ฑ ํ
์คํธ"""
# ์๊ด๊ด๊ณ ๋งคํธ๋ฆญ์ค ๊ณ์ฐ
correlation_matrix = await multi_timeframe_analyzer.calculate_correlation_matrix(
multi_symbol_data,
timeframe="1h",
correlation_window=24
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "matrix" in correlation_matrix
assert "symbols" in correlation_matrix
assert "significant_correlations" in correlation_matrix
# ๋งคํธ๋ฆญ์ค ๊ฒ์ฆ
matrix = correlation_matrix["matrix"]
symbols = correlation_matrix["symbols"]
assert len(matrix) == len(symbols)
assert all(len(row) == len(symbols) for row in matrix)
# ๋๊ฐ์ ์ 1์ด์ด์ผ ํจ
for i in range(len(symbols)):
assert matrix[i][i] == 1.0
# ์๊ด๊ด๊ณ๋ -1๊ณผ 1 ์ฌ์ด
for row in matrix:
for corr in row:
assert -1 <= corr <= 1
@pytest.mark.asyncio
async def test_timeframe_strength_ranking(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""ํ์ํ๋ ์ ๊ฐ๋ ์์ ํ
์คํธ"""
# ํ์ํ๋ ์๋ณ ๊ฐ๋ ํ๊ฐ
strength_ranking = await multi_timeframe_analyzer.rank_timeframe_strength(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"],
criteria=["trend", "momentum", "volume"]
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "rankings" in strength_ranking
assert "scores" in strength_ranking
assert "strongest_timeframe" in strength_ranking
# ์์ ๊ฒ์ฆ
rankings = strength_ranking["rankings"]
assert len(rankings) == 3
for rank in rankings:
assert "timeframe" in rank
assert "score" in rank
assert "breakdown" in rank
assert 0 <= rank["score"] <= 1
@pytest.mark.asyncio
async def test_market_regime_identification(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""์์ฅ ์ฒด์ ์๋ณ ํ
์คํธ"""
# ์์ฅ ์ฒด์ ๋ถ์
market_regime = await multi_timeframe_analyzer.identify_market_regime(
sample_ohlcv_data,
timeframes=["15m", "1h", "4h"]
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "regime" in market_regime
assert "confidence" in market_regime
assert "characteristics" in market_regime
assert "timeframe_regimes" in market_regime
# ์ฒด์ ํ์
๊ฒ์ฆ
assert market_regime["regime"] in ["trending", "ranging", "volatile", "quiet"]
assert 0 <= market_regime["confidence"] <= 1
# ํน์ฑ ๊ฒ์ฆ
characteristics = market_regime["characteristics"]
assert "volatility" in characteristics
assert "trend_strength" in characteristics
assert "volume_profile" in characteristics
@pytest.mark.asyncio
async def test_divergence_confluence_detection(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""๋ค์ด๋ฒ์ ์ค ์ปจํ๋ฃจ์ธ์ค ๊ฐ์ง ํ
์คํธ"""
# ๋ค์ด๋ฒ์ ์ค ์ปจํ๋ฃจ์ธ์ค ๋ถ์
confluence = await multi_timeframe_analyzer.detect_divergence_confluence(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"],
indicators=["rsi", "macd"]
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "confluence_zones" in confluence
assert "divergence_count" in confluence
assert "strength_distribution" in confluence
# ์ปจํ๋ฃจ์ธ์ค ์กด ๊ฒ์ฆ
zones = confluence["confluence_zones"]
for zone in zones:
assert "start_index" in zone
assert "end_index" in zone
assert "timeframes_involved" in zone
assert "indicators_involved" in zone
assert "strength" in zone
assert zone["strength"] > 0
@pytest.mark.asyncio
async def test_optimal_timeframe_selection(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""์ต์ ํ์ํ๋ ์ ์ ํ ํ
์คํธ"""
# ์ต์ ํ์ํ๋ ์ ์ ํ
optimal_tf = await multi_timeframe_analyzer.select_optimal_timeframe(
sample_ohlcv_data,
trading_style="scalping", # scalping, day_trading, swing_trading
market_conditions={"volatility": "high", "trend": "strong"}
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "recommended_timeframe" in optimal_tf
assert "reasoning" in optimal_tf
assert "alternative_timeframes" in optimal_tf
assert "confidence_score" in optimal_tf
# ์ถ์ฒ ํ์ํ๋ ์ ๊ฒ์ฆ
assert optimal_tf["recommended_timeframe"] in ["1m", "5m", "15m", "1h", "4h", "1d"]
assert 0 <= optimal_tf["confidence_score"] <= 1
# ๋์ ํ์ํ๋ ์ ๊ฒ์ฆ
alternatives = optimal_tf["alternative_timeframes"]
assert len(alternatives) > 0
for alt in alternatives:
assert "timeframe" in alt
assert "score" in alt
@pytest.mark.asyncio
async def test_fractal_analysis(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""ํ๋ํ ๋ถ์ ํ
์คํธ"""
# ํ๋ํ ํจํด ๋ถ์
fractal_analysis = await multi_timeframe_analyzer.analyze_fractals(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"],
fractal_period=5
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "fractals" in fractal_analysis
assert "fractal_dimensions" in fractal_analysis
assert "self_similarity_score" in fractal_analysis
# ํ๋ํ ๊ฒ์ฆ
fractals = fractal_analysis["fractals"]
for tf, tf_fractals in fractals.items():
for fractal in tf_fractals:
assert "type" in fractal # up or down
assert "index" in fractal
assert "price" in fractal
assert "strength" in fractal
@pytest.mark.asyncio
async def test_timeframe_transition_analysis(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""ํ์ํ๋ ์ ์ ํ ๋ถ์ ํ
์คํธ"""
# ํ์ํ๋ ์ ์ ํ์ ๋ถ์
transition_analysis = await multi_timeframe_analyzer.analyze_timeframe_transitions(
sample_ohlcv_data,
from_timeframe="5m",
to_timeframe="15m"
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "transition_points" in transition_analysis
assert "signal_quality" in transition_analysis
assert "confirmation_rate" in transition_analysis
# ์ ํ์ ๊ฒ์ฆ
transitions = transition_analysis["transition_points"]
for point in transitions:
assert "index_5m" in point
assert "index_15m" in point
assert "signal_type" in point
assert "confirmed" in point
assert isinstance(point["confirmed"], bool)
@pytest.mark.asyncio
async def test_volatility_across_timeframes(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""ํ์ํ๋ ์๋ณ ๋ณ๋์ฑ ๋ถ์ ํ
์คํธ"""
# ๋ณ๋์ฑ ๋ถ์
volatility_analysis = await multi_timeframe_analyzer.analyze_volatility_profile(
sample_ohlcv_data,
timeframes=["5m", "15m", "1h"],
volatility_window=20
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "volatility_by_timeframe" in volatility_analysis
assert "volatility_ratio" in volatility_analysis
assert "expansion_contraction" in volatility_analysis
# ํ์ํ๋ ์๋ณ ๋ณ๋์ฑ
vol_by_tf = volatility_analysis["volatility_by_timeframe"]
for tf in ["5m", "15m", "1h"]:
assert tf in vol_by_tf
assert "average" in vol_by_tf[tf]
assert "current" in vol_by_tf[tf]
assert "percentile" in vol_by_tf[tf]
assert 0 <= vol_by_tf[tf]["percentile"] <= 100
@pytest.mark.asyncio
async def test_entry_exit_optimization(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""์ง์
/์ฒญ์ฐ ์ต์ ํ ํ
์คํธ"""
# ์ง์
/์ฒญ์ฐ ํฌ์ธํธ ์ต์ ํ
optimization_results = await multi_timeframe_analyzer.optimize_entry_exit_points(
sample_ohlcv_data,
primary_timeframe="15m",
confirmation_timeframes=["5m", "1h"],
strategy_type="momentum"
)
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "entry_points" in optimization_results
assert "exit_points" in optimization_results
assert "risk_reward_ratio" in optimization_results
assert "win_rate_estimate" in optimization_results
# ์ง์
์ ๊ฒ์ฆ
entry_points = optimization_results["entry_points"]
for entry in entry_points:
assert "index" in entry
assert "price" in entry
assert "confidence" in entry
assert "confirmations" in entry
assert len(entry["confirmations"]) > 0
@pytest.mark.asyncio
async def test_data_synchronization(self, multi_timeframe_analyzer):
"""๋ฐ์ดํฐ ๋๊ธฐํ ํ
์คํธ"""
# ๋น๋๊ธฐ ๋ฐ์ดํฐ ์์ฑ
data_1m = []
data_5m = []
base_time = datetime(2024, 1, 1, 0, 0, 0)
# 1๋ถ ๋ฐ์ดํฐ (์ ํํ ์๊ฐ)
for i in range(60):
data_1m.append({
"timestamp": (base_time + timedelta(minutes=i)).isoformat(),
"close": 50000 + i * 10
})
# 5๋ถ ๋ฐ์ดํฐ (์ฝ๊ฐ ์ด๊ธ๋ ์๊ฐ)
for i in range(12):
# 2์ด ๋ฆ์ ํ์์คํฌํ
timestamp = base_time + timedelta(minutes=i*5, seconds=2)
data_5m.append({
"timestamp": timestamp.isoformat(),
"close": 50000 + i * 50
})
# ๋ฐ์ดํฐ ๋๊ธฐํ
synced_data = await multi_timeframe_analyzer.synchronize_timeframe_data({
"1m": data_1m,
"5m": data_5m
})
# ๊ฒฐ๊ณผ ๊ฒ์ฆ
assert "synchronized_timestamps" in synced_data
assert "aligned_data" in synced_data
assert "alignment_quality" in synced_data
# ์ ๋ ฌ ํ์ง ํ์ธ
assert synced_data["alignment_quality"] > 0.8
@pytest.mark.asyncio
async def test_error_handling(self, multi_timeframe_analyzer):
"""์ค๋ฅ ์ฒ๋ฆฌ ํ
์คํธ"""
# ์๋ชป๋ ํ์ํ๋ ์
with pytest.raises(TimeframeError):
await multi_timeframe_analyzer.aggregate_to_timeframe(
[],
target_timeframe="invalid"
)
# ๋ฐ์ดํฐ ๋ถ์กฑ
with pytest.raises(InsufficientDataError):
await multi_timeframe_analyzer.analyze_timeframes(
[{"timestamp": "2024-01-01", "close": 100}], # 1๊ฐ ๋ฐ์ดํฐ
timeframes=["1h", "4h"]
)
# ๋ถ์ ์ค๋ฅ
with pytest.raises(AnalysisError):
await multi_timeframe_analyzer.detect_trend_alignment(
None, # None ๋ฐ์ดํฐ
timeframes=["5m"]
)
@pytest.mark.asyncio
async def test_performance_metrics(self, multi_timeframe_analyzer, sample_ohlcv_data):
"""์ฑ๋ฅ ๋ฉํธ๋ฆญ ํ
์คํธ"""
# ์ฌ๋ฌ ๋ถ์ ์ํ
await multi_timeframe_analyzer.analyze_timeframes(
sample_ohlcv_data,
timeframes=["5m", "15m"]
)
await multi_timeframe_analyzer.detect_trend_alignment(
sample_ohlcv_data,
timeframes=["5m", "15m"]
)
# ์ฑ๋ฅ ๋ฉํธ๋ฆญ ํ์ธ
metrics = multi_timeframe_analyzer.get_performance_metrics()
assert "analysis_count" in metrics
assert "average_processing_time" in metrics
assert "cache_hit_rate" in metrics
assert metrics["analysis_count"] > 0
assert metrics["average_processing_time"] > 0
assert 0 <= metrics["cache_hit_rate"] <= 1