teste / tests /unit /test_voting_system.py
torxyton's picture
feat: Implementa estrutura completa de testes com pytest
b9c68d4
"""Unit tests for VotingStrategy module."""
import pytest
from unittest.mock import Mock, patch, MagicMock
import numpy as np
from datetime import datetime
# Import the module under test
try:
from src.ai.voting_system import VotingStrategy
except ImportError:
pytest.skip("VotingStrategy not available", allow_module_level=True)
class TestVotingStrategy:
"""Test cases for VotingStrategy."""
@pytest.fixture
def voting_strategy(self):
"""Create VotingStrategy instance for testing."""
return VotingStrategy()
@pytest.fixture
def sample_predictions(self):
"""Sample predictions from different models."""
return [
{'model': 'model_a', 'prediction': 'buy', 'confidence': 0.8},
{'model': 'model_b', 'prediction': 'sell', 'confidence': 0.6},
{'model': 'model_c', 'prediction': 'buy', 'confidence': 0.9},
{'model': 'model_d', 'prediction': 'hold', 'confidence': 0.7}
]
@pytest.fixture
def weighted_models(self):
"""Sample model weights for weighted voting."""
return {
'model_a': 0.3,
'model_b': 0.2,
'model_c': 0.4,
'model_d': 0.1
}
def test_voting_strategy_initialization(self, voting_strategy):
"""Test VotingStrategy initialization."""
assert voting_strategy is not None
assert hasattr(voting_strategy, 'vote')
assert hasattr(voting_strategy, 'majority_vote')
def test_majority_vote_clear_winner(self, voting_strategy):
"""Test majority voting with clear winner."""
predictions = [
{'prediction': 'buy', 'confidence': 0.8},
{'prediction': 'buy', 'confidence': 0.7},
{'prediction': 'buy', 'confidence': 0.9},
{'prediction': 'sell', 'confidence': 0.6}
]
result = voting_strategy.majority_vote(predictions)
assert result['decision'] == 'buy'
assert 'confidence' in result
assert result['confidence'] > 0
def test_majority_vote_tie(self, voting_strategy):
"""Test majority voting with tie scenario."""
predictions = [
{'prediction': 'buy', 'confidence': 0.8},
{'prediction': 'sell', 'confidence': 0.7},
{'prediction': 'buy', 'confidence': 0.6},
{'prediction': 'sell', 'confidence': 0.9}
]
result = voting_strategy.majority_vote(predictions)
# Should handle tie appropriately
assert 'decision' in result
assert result['decision'] in ['buy', 'sell', 'hold']
def test_weighted_vote(self, voting_strategy, sample_predictions, weighted_models):
"""Test weighted voting mechanism."""
result = voting_strategy.weighted_vote(sample_predictions, weighted_models)
assert 'decision' in result
assert 'confidence' in result
assert 'weighted_score' in result
assert result['decision'] in ['buy', 'sell', 'hold']
def test_confidence_weighted_vote(self, voting_strategy, sample_predictions):
"""Test confidence-weighted voting."""
result = voting_strategy.confidence_weighted_vote(sample_predictions)
assert 'decision' in result
assert 'confidence' in result
# Higher confidence predictions should have more influence
assert result['confidence'] > 0
@pytest.mark.parametrize("voting_method", [
'majority',
'weighted',
'confidence_weighted',
'unanimous'
])
def test_different_voting_methods(self, voting_strategy, sample_predictions, voting_method):
"""Test different voting methods."""
if voting_method == 'majority':
result = voting_strategy.majority_vote(sample_predictions)
elif voting_method == 'weighted':
weights = {'model_a': 0.4, 'model_b': 0.3, 'model_c': 0.2, 'model_d': 0.1}
result = voting_strategy.weighted_vote(sample_predictions, weights)
elif voting_method == 'confidence_weighted':
result = voting_strategy.confidence_weighted_vote(sample_predictions)
elif voting_method == 'unanimous':
result = voting_strategy.unanimous_vote(sample_predictions)
assert isinstance(result, dict)
assert 'decision' in result
def test_unanimous_vote_success(self, voting_strategy):
"""Test unanimous voting when all models agree."""
unanimous_predictions = [
{'prediction': 'buy', 'confidence': 0.8},
{'prediction': 'buy', 'confidence': 0.7},
{'prediction': 'buy', 'confidence': 0.9}
]
result = voting_strategy.unanimous_vote(unanimous_predictions)
assert result['decision'] == 'buy'
assert result['unanimous'] is True
def test_unanimous_vote_failure(self, voting_strategy, sample_predictions):
"""Test unanimous voting when models disagree."""
result = voting_strategy.unanimous_vote(sample_predictions)
assert result['unanimous'] is False
assert result['decision'] in ['buy', 'sell', 'hold', 'no_consensus']
def test_empty_predictions(self, voting_strategy):
"""Test voting with empty predictions list."""
empty_predictions = []
with pytest.raises((ValueError, IndexError)):
voting_strategy.majority_vote(empty_predictions)
def test_invalid_prediction_format(self, voting_strategy):
"""Test voting with invalid prediction format."""
invalid_predictions = [
{'invalid_key': 'value'},
{'another_invalid': 'format'}
]
with pytest.raises((KeyError, ValueError)):
voting_strategy.majority_vote(invalid_predictions)
def test_confidence_threshold_filtering(self, voting_strategy):
"""Test filtering predictions by confidence threshold."""
mixed_confidence_predictions = [
{'prediction': 'buy', 'confidence': 0.9}, # High confidence
{'prediction': 'sell', 'confidence': 0.3}, # Low confidence
{'prediction': 'buy', 'confidence': 0.8}, # High confidence
{'prediction': 'hold', 'confidence': 0.2} # Low confidence
]
threshold = 0.5
result = voting_strategy.vote_with_threshold(mixed_confidence_predictions, threshold)
assert 'decision' in result
assert 'filtered_count' in result
# Should only consider high-confidence predictions
assert result['filtered_count'] == 2
def test_model_performance_weighting(self, voting_strategy):
"""Test weighting based on historical model performance."""
model_performance = {
'model_a': 0.85, # 85% accuracy
'model_b': 0.60, # 60% accuracy
'model_c': 0.92, # 92% accuracy
'model_d': 0.70 # 70% accuracy
}
predictions = [
{'model': 'model_a', 'prediction': 'buy', 'confidence': 0.8},
{'model': 'model_b', 'prediction': 'sell', 'confidence': 0.6},
{'model': 'model_c', 'prediction': 'buy', 'confidence': 0.9},
{'model': 'model_d', 'prediction': 'hold', 'confidence': 0.7}
]
result = voting_strategy.performance_weighted_vote(predictions, model_performance)
assert 'decision' in result
assert 'performance_weighted_score' in result
# Model C has highest performance, so 'buy' should be favored
def test_adaptive_voting_strategy(self, voting_strategy):
"""Test adaptive voting that changes strategy based on market conditions."""
market_conditions = {
'volatility': 'high',
'trend': 'bullish',
'volume': 'above_average'
}
predictions = [
{'prediction': 'buy', 'confidence': 0.7},
{'prediction': 'buy', 'confidence': 0.8},
{'prediction': 'sell', 'confidence': 0.6}
]
result = voting_strategy.adaptive_vote(predictions, market_conditions)
assert 'decision' in result
assert 'strategy_used' in result
assert 'market_adjustment' in result
def test_time_decay_weighting(self, voting_strategy):
"""Test time-based decay weighting for predictions."""
from datetime import datetime, timedelta
now = datetime.now()
predictions_with_time = [
{
'prediction': 'buy',
'confidence': 0.8,
'timestamp': now - timedelta(minutes=1) # Recent
},
{
'prediction': 'sell',
'confidence': 0.7,
'timestamp': now - timedelta(hours=1) # Older
},
{
'prediction': 'buy',
'confidence': 0.6,
'timestamp': now - timedelta(minutes=30) # Medium age
}
]
result = voting_strategy.time_weighted_vote(predictions_with_time)
assert 'decision' in result
assert 'time_weighted_score' in result
# Recent predictions should have more weight
def test_ensemble_voting_combination(self, voting_strategy, sample_predictions):
"""Test ensemble voting combining multiple strategies."""
strategies = ['majority', 'confidence_weighted', 'weighted']
weights = {'model_a': 0.3, 'model_b': 0.2, 'model_c': 0.4, 'model_d': 0.1}
result = voting_strategy.ensemble_vote(sample_predictions, strategies, weights)
assert 'decision' in result
assert 'ensemble_confidence' in result
assert 'strategy_results' in result
assert len(result['strategy_results']) == len(strategies)
def test_voting_with_abstention(self, voting_strategy):
"""Test voting mechanism that allows abstention."""
low_confidence_predictions = [
{'prediction': 'buy', 'confidence': 0.4},
{'prediction': 'sell', 'confidence': 0.3},
{'prediction': 'hold', 'confidence': 0.35}
]
min_confidence = 0.6
result = voting_strategy.vote_with_abstention(low_confidence_predictions, min_confidence)
assert result['decision'] == 'abstain'
assert 'reason' in result
def test_consensus_measurement(self, voting_strategy, sample_predictions):
"""Test consensus measurement among predictions."""
consensus_score = voting_strategy.measure_consensus(sample_predictions)
assert isinstance(consensus_score, (float, int))
assert 0 <= consensus_score <= 1
def test_prediction_diversity_analysis(self, voting_strategy, sample_predictions):
"""Test analysis of prediction diversity."""
diversity_metrics = voting_strategy.analyze_diversity(sample_predictions)
assert 'entropy' in diversity_metrics
assert 'agreement_ratio' in diversity_metrics
assert 'prediction_distribution' in diversity_metrics
@pytest.mark.performance
def test_voting_performance_large_dataset(self, voting_strategy):
"""Test voting performance with large number of predictions."""
import time
# Generate large dataset
large_predictions = []
for i in range(1000):
large_predictions.append({
'model': f'model_{i}',
'prediction': np.random.choice(['buy', 'sell', 'hold']),
'confidence': np.random.uniform(0.5, 1.0)
})
start_time = time.time()
result = voting_strategy.majority_vote(large_predictions)
processing_time = time.time() - start_time
assert result is not None
assert processing_time < 1.0 # Should complete within 1 second
def test_voting_strategy_serialization(self, voting_strategy):
"""Test serialization and deserialization of voting strategy."""
import json
# Test if strategy can be serialized (for saving/loading)
strategy_config = {
'method': 'weighted',
'weights': {'model_a': 0.4, 'model_b': 0.6},
'threshold': 0.5
}
serialized = json.dumps(strategy_config)
deserialized = json.loads(serialized)
assert deserialized['method'] == 'weighted'
assert deserialized['threshold'] == 0.5
def test_voting_with_missing_confidence(self, voting_strategy):
"""Test voting when some predictions lack confidence scores."""
mixed_predictions = [
{'prediction': 'buy', 'confidence': 0.8},
{'prediction': 'sell'}, # Missing confidence
{'prediction': 'buy', 'confidence': 0.7}
]
# Should handle missing confidence gracefully
result = voting_strategy.majority_vote(mixed_predictions)
assert 'decision' in result