FraudSimulator-AI / agents /risk_scoring_agent.py
Bader Alabddan
Complete FraudSimulator-AI vertical
7f10b99
"""Risk Scoring Agent - Combines signals into final fraud risk score."""
from typing import Dict, List, Any
class RiskScoringAgent:
"""Calculates final fraud risk score."""
def __init__(self):
self.name = "RiskScoringAgent"
self.version = "1.0"
self.investigation_threshold = 0.7
def process(
self,
pattern_results: Dict[str, Any],
anomaly_results: Dict[str, Any]
) -> Dict[str, Any]:
"""Calculate final fraud risk score."""
# Weighted combination
pattern_score = pattern_results.get("overall_pattern_score", 0.0)
anomaly_score = anomaly_results.get("anomaly_score", 0.0)
# 60% patterns, 40% anomalies
final_score = (pattern_score * 0.6) + (anomaly_score * 0.4)
# Determine risk level
if final_score >= 0.7:
risk_level = "high"
recommended_action = "immediate_investigation"
elif final_score >= 0.4:
risk_level = "medium"
recommended_action = "enhanced_review"
else:
risk_level = "low"
recommended_action = "standard_processing"
# Collect all indicators
fraud_indicators = []
fraud_indicators.extend(pattern_results.get("detected_patterns", []))
fraud_indicators.extend(anomaly_results.get("anomalies_detected", []))
return {
"fraud_score": final_score,
"risk_level": risk_level,
"fraud_indicators": fraud_indicators,
"recommended_action": recommended_action,
"requires_investigation": final_score >= self.investigation_threshold,
"confidence": 0.88
}
def get_trace(self) -> Dict[str, Any]:
return {
"agent": self.name,
"version": self.version,
"timestamp": "2024-12-31T01:00:00Z",
"status": "completed"
}