Spaces:
Sleeping
Sleeping
| """ | |
| Synapse AI System for FitScore Feedback Agent | |
| """ | |
| import uuid | |
| from datetime import datetime | |
| from typing import Dict, Any, Optional | |
| from sqlalchemy.orm import Session | |
| from .database import get_db, CandidateSubmission, LearningEvent, ActiveLearningRequest, FitScoreModel | |
| class SynapseAISystem: | |
| """Synapse AI system for advanced candidate evaluation and learning""" | |
| def __init__(self): | |
| self.model_version = "v1.0" | |
| self.confidence_threshold = 0.8 | |
| self.uncertainty_threshold = 0.3 | |
| async def calculate_fitscore(self, candidate_data: Dict[str, Any], job_data: Dict[str, Any]) -> Dict[str, Any]: | |
| """Calculate FitScore with confidence intervals and uncertainty handling""" | |
| try: | |
| # Extract candidate information | |
| skills = candidate_data.get("skills", []) | |
| years_experience = candidate_data.get("years_experience", 0) | |
| education_level = candidate_data.get("education_level", "Bachelors") | |
| company_size = candidate_data.get("company_size", "Medium") | |
| # Extract job requirements | |
| required_skills = job_data.get("required_skills", []) | |
| required_experience = job_data.get("required_experience", 0) | |
| company_size_job = job_data.get("company_size", "Medium") | |
| # Calculate category scores | |
| skills_match = self._calculate_skills_match(skills, required_skills) | |
| experience_match = self._calculate_experience_match(years_experience, required_experience) | |
| education_score = self._calculate_education_score(education_level) | |
| company_relevance = self._calculate_company_relevance(company_size, company_size_job) | |
| # Calculate weighted FitScore | |
| weights = { | |
| "skills": 0.35, | |
| "experience": 0.25, | |
| "education": 0.20, | |
| "company_relevance": 0.20 | |
| } | |
| fitscore = ( | |
| skills_match * weights["skills"] + | |
| experience_match * weights["experience"] + | |
| education_score * weights["education"] + | |
| company_relevance * weights["company_relevance"] | |
| ) * 10.0 # Scale to 0-10 | |
| # Calculate confidence and uncertainty | |
| confidence = self._calculate_confidence(skills_match, experience_match, education_score, company_relevance) | |
| uncertainty = self._calculate_uncertainty(candidate_data, job_data) | |
| # Determine verdict | |
| verdict = self._determine_verdict(fitscore, confidence) | |
| # Generate detailed analysis | |
| analysis = self._generate_analysis(candidate_data, job_data, fitscore, confidence, uncertainty) | |
| return { | |
| "fitscore": round(fitscore, 2), | |
| "confidence": round(confidence, 3), | |
| "uncertainty": round(uncertainty, 3), | |
| "verdict": verdict, | |
| "category_scores": { | |
| "skills_match": round(skills_match * 10, 2), | |
| "experience_match": round(experience_match * 10, 2), | |
| "education": round(education_score * 10, 2), | |
| "company_relevance": round(company_relevance * 10, 2) | |
| }, | |
| "analysis": analysis, | |
| "model_version": self.model_version, | |
| "requires_human_review": uncertainty > self.uncertainty_threshold | |
| } | |
| except Exception as e: | |
| print(f"❌ Error calculating FitScore: {e}") | |
| raise | |
| async def submit_candidate(self, candidate_data: Dict[str, Any], job_data: Dict[str, Any], | |
| recruiter_id: str) -> Dict[str, Any]: | |
| """Submit candidate with FitScore validation""" | |
| try: | |
| # Calculate FitScore | |
| fitscore_result = await self.calculate_fitscore(candidate_data, job_data) | |
| # Check if candidate meets minimum threshold | |
| if fitscore_result["fitscore"] < 8.2: | |
| return { | |
| "success": False, | |
| "message": f"Candidate FitScore ({fitscore_result['fitscore']}) below minimum threshold (8.2)", | |
| "fitscore": fitscore_result["fitscore"], | |
| "verdict": "Reject - Below Threshold" | |
| } | |
| # Store submission | |
| db = next(get_db()) | |
| submission_id = str(uuid.uuid4()) | |
| submission = CandidateSubmission( | |
| submission_id=submission_id, | |
| candidate_id=candidate_data.get("candidate_id", str(uuid.uuid4())), | |
| job_id=job_data.get("job_id", str(uuid.uuid4())), | |
| recruiter_id=recruiter_id, | |
| fit_score=fitscore_result["fitscore"], | |
| fitscore=fitscore_result["fitscore"], | |
| fitscore_confidence=fitscore_result["confidence"], | |
| fitscore_model_version=fitscore_result["model_version"], | |
| uncertainty_score=fitscore_result["uncertainty"], | |
| status="submitted", | |
| outcome="pending" | |
| ) | |
| db.add(submission) | |
| db.commit() | |
| # Create learning event | |
| learning_event = LearningEvent( | |
| event_id=str(uuid.uuid4()), | |
| event_type="synapse_submission", | |
| candidate_id=candidate_data.get("candidate_id"), | |
| job_id=job_data.get("job_id"), | |
| submission_id=submission_id, | |
| input_data={"candidate_data": candidate_data, "job_data": job_data}, | |
| outcome_data={"fitscore": fitscore_result["fitscore"], "verdict": fitscore_result["verdict"]}, | |
| confidence_score=fitscore_result["confidence"], | |
| model_version=self.model_version, | |
| learning_signal=0.0, | |
| processed=False | |
| ) | |
| db.add(learning_event) | |
| db.commit() | |
| return { | |
| "success": True, | |
| "submission_id": submission_id, | |
| "fitscore": fitscore_result["fitscore"], | |
| "confidence": fitscore_result["confidence"], | |
| "verdict": fitscore_result["verdict"], | |
| "requires_human_review": fitscore_result["requires_human_review"], | |
| "message": "Candidate submitted successfully" | |
| } | |
| except Exception as e: | |
| print(f"❌ Error submitting candidate: {e}") | |
| raise | |
| async def record_outcome(self, submission_id: str, outcome: str, feedback: str, notes: str) -> Dict[str, Any]: | |
| """Record submission outcome and trigger learning events""" | |
| try: | |
| db = next(get_db()) | |
| # Find submission | |
| submission = db.query(CandidateSubmission).filter( | |
| CandidateSubmission.submission_id == submission_id | |
| ).first() | |
| if not submission: | |
| raise ValueError("Submission not found") | |
| # Update submission | |
| submission.outcome = outcome | |
| submission.outcome_notes = notes | |
| submission.outcome_date = datetime.utcnow() | |
| # Create learning event | |
| learning_event = LearningEvent( | |
| event_id=str(uuid.uuid4()), | |
| event_type="synapse_outcome", | |
| candidate_id=submission.candidate_id, | |
| job_id=submission.job_id, | |
| submission_id=submission_id, | |
| input_data={"original_fitscore": submission.fitscore, "confidence": submission.fitscore_confidence}, | |
| outcome_data={"outcome": outcome, "feedback": feedback}, | |
| confidence_score=submission.fitscore_confidence, | |
| model_version=self.model_version, | |
| learning_signal=self._calculate_learning_signal(outcome, submission.fitscore), | |
| processed=False | |
| ) | |
| db.add(learning_event) | |
| db.commit() | |
| return { | |
| "success": True, | |
| "submission_id": submission_id, | |
| "outcome": outcome, | |
| "learning_signal": learning_event.learning_signal, | |
| "message": "Outcome recorded and learning event created" | |
| } | |
| except Exception as e: | |
| print(f"❌ Error recording outcome: {e}") | |
| raise | |
| async def get_system_analytics(self) -> Dict[str, Any]: | |
| """Get comprehensive Synapse AI system analytics""" | |
| try: | |
| db = next(get_db()) | |
| total_submissions = db.query(CandidateSubmission).count() | |
| successful_submissions = db.query(CandidateSubmission).filter( | |
| CandidateSubmission.outcome == "hired" | |
| ).count() | |
| success_rate = (successful_submissions / total_submissions * 100) if total_submissions > 0 else 0 | |
| # Calculate average FitScore | |
| avg_fitscore = db.query(CandidateSubmission.fitscore).filter( | |
| CandidateSubmission.fitscore.isnot(None) | |
| ).all() | |
| avg_fitscore = sum(score[0] for score in avg_fitscore) / len(avg_fitscore) if avg_fitscore else 0 | |
| # Get model performance | |
| models = db.query(FitScoreModel).order_by(FitScoreModel.created_at.desc()).limit(5).all() | |
| return { | |
| "total_submissions": total_submissions, | |
| "successful_submissions": successful_submissions, | |
| "success_rate": round(success_rate, 2), | |
| "average_fitscore": round(avg_fitscore, 2), | |
| "model_performance": [ | |
| { | |
| "version": model.version, | |
| "accuracy": model.accuracy, | |
| "precision": model.precision, | |
| "recall": model.recall, | |
| "f1_score": model.f1_score, | |
| "training_data_count": model.training_data_count | |
| } | |
| for model in models | |
| ], | |
| "system_metrics": { | |
| "confidence_threshold": self.confidence_threshold, | |
| "uncertainty_threshold": self.uncertainty_threshold, | |
| "model_version": self.model_version | |
| } | |
| } | |
| except Exception as e: | |
| print(f"❌ Error getting system analytics: {e}") | |
| return { | |
| "total_submissions": 0, | |
| "successful_submissions": 0, | |
| "success_rate": 0, | |
| "average_fitscore": 0, | |
| "model_performance": [], | |
| "system_metrics": { | |
| "confidence_threshold": self.confidence_threshold, | |
| "uncertainty_threshold": self.uncertainty_threshold, | |
| "model_version": self.model_version | |
| } | |
| } | |
| def _calculate_skills_match(self, candidate_skills: list, required_skills: list) -> float: | |
| """Calculate skills match score""" | |
| if not required_skills: | |
| return 0.5 # Default score if no requirements specified | |
| matching_skills = set(candidate_skills) & set(required_skills) | |
| match_ratio = len(matching_skills) / len(required_skills) | |
| # Bonus for having more skills than required | |
| bonus = min(len(candidate_skills) / len(required_skills) - 1, 0.2) if len(required_skills) > 0 else 0 | |
| return min(match_ratio + bonus, 1.0) | |
| def _calculate_experience_match(self, candidate_years: int, required_years: int) -> float: | |
| """Calculate experience match score""" | |
| if required_years == 0: | |
| return 0.5 # Default score if no experience required | |
| if candidate_years >= required_years: | |
| # Full score for meeting or exceeding requirements | |
| return 1.0 | |
| else: | |
| # Partial score based on how close they are | |
| return max(0.0, candidate_years / required_years) | |
| def _calculate_education_score(self, education_level: str) -> float: | |
| """Calculate education score""" | |
| education_scores = { | |
| "PhD": 1.0, | |
| "Masters": 0.9, | |
| "Bachelors": 0.7, | |
| "Associate": 0.5, | |
| "High School": 0.3 | |
| } | |
| return education_scores.get(education_level, 0.5) | |
| def _calculate_company_relevance(self, candidate_company: str, job_company: str) -> float: | |
| """Calculate company relevance score""" | |
| # Simplified company relevance calculation | |
| if candidate_company == job_company: | |
| return 1.0 | |
| elif candidate_company in ["Fortune 500", "Large"] and job_company in ["Fortune 500", "Large"]: | |
| return 0.9 | |
| elif candidate_company == job_company: | |
| return 0.8 | |
| else: | |
| return 0.6 | |
| def _calculate_confidence(self, skills_match: float, experience_match: float, | |
| education_score: float, company_relevance: float) -> float: | |
| """Calculate confidence level""" | |
| # Higher confidence when all scores are consistent | |
| scores = [skills_match, experience_match, education_score, company_relevance] | |
| variance = sum((score - sum(scores)/len(scores))**2 for score in scores) / len(scores) | |
| # Lower variance = higher confidence | |
| confidence = max(0.5, 1.0 - variance) | |
| return confidence | |
| def _calculate_uncertainty(self, candidate_data: Dict[str, Any], job_data: Dict[str, Any]) -> float: | |
| """Calculate uncertainty score""" | |
| # Higher uncertainty when data is missing or inconsistent | |
| uncertainty_factors = [] | |
| # Missing candidate data | |
| if not candidate_data.get("skills"): | |
| uncertainty_factors.append(0.3) | |
| if not candidate_data.get("years_experience"): | |
| uncertainty_factors.append(0.2) | |
| if not candidate_data.get("education_level"): | |
| uncertainty_factors.append(0.2) | |
| # Missing job data | |
| if not job_data.get("required_skills"): | |
| uncertainty_factors.append(0.3) | |
| if not job_data.get("required_experience"): | |
| uncertainty_factors.append(0.2) | |
| # Calculate total uncertainty | |
| total_uncertainty = sum(uncertainty_factors) / len(uncertainty_factors) if uncertainty_factors else 0.1 | |
| return min(total_uncertainty, 1.0) | |
| def _determine_verdict(self, fitscore: float, confidence: float) -> str: | |
| """Determine verdict based on FitScore and confidence""" | |
| if fitscore >= 8.5 and confidence >= 0.8: | |
| return "Strong Accept" | |
| elif fitscore >= 7.0: | |
| return "Accept" | |
| elif fitscore >= 5.0: | |
| return "Consider" | |
| else: | |
| return "Reject" | |
| def _generate_analysis(self, candidate_data: Dict[str, Any], job_data: Dict[str, Any], | |
| fitscore: float, confidence: float, uncertainty: float) -> str: | |
| """Generate detailed analysis""" | |
| analysis = f"FitScore Analysis:\n" | |
| analysis += f"- Score: {fitscore:.2f}/10\n" | |
| analysis += f"- Confidence: {confidence:.1%}\n" | |
| analysis += f"- Uncertainty: {uncertainty:.1%}\n\n" | |
| analysis += f"Candidate Profile:\n" | |
| analysis += f"- Skills: {', '.join(candidate_data.get('skills', []))}\n" | |
| analysis += f"- Experience: {candidate_data.get('years_experience', 0)} years\n" | |
| analysis += f"- Education: {candidate_data.get('education_level', 'Not specified')}\n\n" | |
| analysis += f"Job Requirements:\n" | |
| analysis += f"- Required Skills: {', '.join(job_data.get('required_skills', []))}\n" | |
| analysis += f"- Required Experience: {job_data.get('required_experience', 0)} years\n" | |
| if uncertainty > self.uncertainty_threshold: | |
| analysis += f"\n⚠️ High uncertainty detected. Human review recommended." | |
| return analysis | |
| def _calculate_learning_signal(self, outcome: str, original_fitscore: float) -> float: | |
| """Calculate learning signal based on outcome""" | |
| if outcome == "hired": | |
| # Positive signal if FitScore was high | |
| return max(0.0, (original_fitscore - 7.0) / 3.0) | |
| elif outcome == "rejected": | |
| # Negative signal if FitScore was high | |
| return min(0.0, (7.0 - original_fitscore) / 3.0) | |
| else: | |
| return 0.0 # Neutral for other outcomes |