Spaces:
Sleeping
Sleeping
| import os | |
| import time | |
| import random | |
| import sys | |
| import subprocess | |
| from typing import List | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| # SETUP | |
| try: | |
| import ollama | |
| from sentence_transformers import SentenceTransformer | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| except ImportError: | |
| subprocess.check_call([sys.executable, "-m", "pip", "install", "ollama", "sentence-transformers", "scikit-learn"]) | |
| import ollama | |
| from sentence_transformers import SentenceTransformer | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| app = FastAPI(title="AI Interviewer API - Hugging Face", version="2.0") | |
| print("⏳ Loading Judge Model...") | |
| judge_model = SentenceTransformer('paraphrase-multilingual-mpnet-base-v2') | |
| OLLAMA_MODEL = "deepseek-r1:8b" | |
| def ensure_ollama_model(): | |
| print("Checking Ollama service...") | |
| subprocess.Popen("ollama serve", shell=True) | |
| time.sleep(5) | |
| print(f"Pulling model {OLLAMA_MODEL}...") | |
| subprocess.run(f"ollama pull {OLLAMA_MODEL}", shell=True) | |
| print("✅ Model ready!") | |
| ensure_ollama_model() | |
| # MODELS | |
| class IntroRequest(BaseModel): | |
| candidate_intro: str | |
| track: str | |
| persona: str | |
| class QuestionRequest(BaseModel): | |
| track: str | |
| difficulty: str | |
| persona: str | |
| current_q_num: int | |
| prev_questions: List[str] = [] | |
| class AnswerRequest(BaseModel): | |
| question: str | |
| ideal_answer: str | |
| user_answer: str | |
| time_taken: float | |
| time_limit: int | |
| class FeedbackRequest(BaseModel): | |
| persona: str | |
| score: float | |
| question: str | |
| user_answer: str | |
| class SummaryItem(BaseModel): | |
| score: float | |
| q: str | |
| class SummaryRequest(BaseModel): | |
| track: str | |
| results: List[SummaryItem] | |
| # HELPERS | |
| def get_persona_traits(persona): | |
| if persona == "Friendly": return "supportive, smiling" | |
| elif persona == "Aggressive": return "strict, impatient" | |
| else: return "formal, neutral" | |
| def calculate_score_logic(ref, user_ans): | |
| if not user_ans.strip(): return 0.0 | |
| emb = judge_model.encode([ref, user_ans]) | |
| return round(float(max(0, cosine_similarity([emb[0]], [emb[1]])[0][0] * 10)), 2) | |
| # ENDPOINTS | |
| def home(): | |
| return {"status": "Online on Hugging Face", "model": OLLAMA_MODEL} | |
| def analyze_intro(req: IntroRequest): | |
| traits = get_persona_traits(req.persona) | |
| prompt = f"Act as {traits} interviewer for {req.track}. Evaluate intro: '{req.candidate_intro}'. Max 2 sentences." | |
| try: | |
| res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}]) | |
| text = res['message']['content'].split("</think>")[-1].strip() | |
| return {"feedback": text.replace('"', '')} | |
| except: return {"feedback": "Proceeding."} | |
| def generate_question(req: QuestionRequest): | |
| base_time = 45 | |
| hist = "\n".join(req.prev_questions[-3:]) if req.prev_questions else "None" | |
| prompt = f"Ask 1 verbal question about {req.track} ({req.difficulty}). Prev: {hist}. Format: Emotion: [] Question: [] Answer: []" | |
| try: | |
| res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}]) | |
| full = res['message']['content'].split("</think>")[-1].strip() | |
| q, a, e = "Error", "Gen", "Neutral" | |
| for l in full.split('\n'): | |
| if "Question:" in l: q = l.split("Question:")[1].strip() | |
| elif "Answer:" in l: a = l.split("Answer:")[1].strip() | |
| elif "Emotion:" in l: e = l.split("Emotion:")[1].strip() | |
| if q != "Error": return {"question": q, "ideal_answer": a, "emotion": e, "time_limit": base_time} | |
| except: pass | |
| return {"question": f"Explain a core concept in {req.track}.", "ideal_answer": "General", "emotion": "Neutral", "time_limit": 45} | |
| def evaluate_answer(req: AnswerRequest): | |
| score = calculate_score_logic(req.ideal_answer, req.user_answer) | |
| final = score | |
| msg = "" | |
| if score >= 5: | |
| if req.time_taken < req.time_limit * 0.7: final += 0.5; msg = "Speed Bonus" | |
| elif req.time_taken > req.time_limit * 1.5: final -= 0.5; msg = "Late Penalty" | |
| return {"base_score": score, "final_score": max(0, min(10, final)), "bonus_message": msg} | |
| def generate_feedback(req: FeedbackRequest): | |
| prompt = f"Role: {req.persona}. Q: {req.question}. Ans: {req.user_answer}. Score: {req.score}. 1 sentence feedback." | |
| try: | |
| res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}]) | |
| return {"feedback": res['message']['content'].split("</think>")[-1].strip().replace('"','')} | |
| except: return {"feedback": "Good effort."} | |
| def generate_summary(req: SummaryRequest): | |
| avg = sum(r.score for r in req.results) / len(req.results) | |
| prompt = f"Summary for {req.track}. Avg: {avg:.1f}. 30 words max." | |
| try: | |
| res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}]) | |
| return {"summary": res['message']['content'].split("</think>")[-1].strip().replace('"','')} | |
| except: return {"summary": "Good job."} | |