Spaces:
Running
Running
File size: 5,149 Bytes
270b018 207913a 270b018 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import os
import time
import random
import sys
import subprocess
from typing import List
from fastapi import FastAPI
from pydantic import BaseModel
# SETUP
try:
import ollama
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "ollama", "sentence-transformers", "scikit-learn"])
import ollama
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
app = FastAPI(title="AI Interviewer API - Hugging Face", version="2.0")
print("⏳ Loading Judge Model...")
judge_model = SentenceTransformer('paraphrase-multilingual-mpnet-base-v2')
OLLAMA_MODEL = "deepseek-r1:8b"
def ensure_ollama_model():
print("Checking Ollama service...")
subprocess.Popen("ollama serve", shell=True)
time.sleep(5)
print(f"Pulling model {OLLAMA_MODEL}...")
subprocess.run(f"ollama pull {OLLAMA_MODEL}", shell=True)
print("✅ Model ready!")
ensure_ollama_model()
# MODELS
class IntroRequest(BaseModel):
candidate_intro: str
track: str
persona: str
class QuestionRequest(BaseModel):
track: str
difficulty: str
persona: str
current_q_num: int
prev_questions: List[str] = []
class AnswerRequest(BaseModel):
question: str
ideal_answer: str
user_answer: str
time_taken: float
time_limit: int
class FeedbackRequest(BaseModel):
persona: str
score: float
question: str
user_answer: str
class SummaryItem(BaseModel):
score: float
q: str
class SummaryRequest(BaseModel):
track: str
results: List[SummaryItem]
# HELPERS
def get_persona_traits(persona):
if persona == "Friendly": return "supportive, smiling"
elif persona == "Aggressive": return "strict, impatient"
else: return "formal, neutral"
def calculate_score_logic(ref, user_ans):
if not user_ans.strip(): return 0.0
emb = judge_model.encode([ref, user_ans])
return round(float(max(0, cosine_similarity([emb[0]], [emb[1]])[0][0] * 10)), 2)
# ENDPOINTS
@app.get("/")
def home():
return {"status": "Online on Hugging Face", "model": OLLAMA_MODEL}
@app.post("/analyze_intro")
def analyze_intro(req: IntroRequest):
traits = get_persona_traits(req.persona)
prompt = f"Act as {traits} interviewer for {req.track}. Evaluate intro: '{req.candidate_intro}'. Max 2 sentences."
try:
res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}])
text = res['message']['content'].split("</think>")[-1].strip()
return {"feedback": text.replace('"', '')}
except: return {"feedback": "Proceeding."}
@app.post("/generate_question")
def generate_question(req: QuestionRequest):
base_time = 45
hist = "\n".join(req.prev_questions[-3:]) if req.prev_questions else "None"
prompt = f"Ask 1 verbal question about {req.track} ({req.difficulty}). Prev: {hist}. Format: Emotion: [] Question: [] Answer: []"
try:
res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}])
full = res['message']['content'].split("</think>")[-1].strip()
q, a, e = "Error", "Gen", "Neutral"
for l in full.split('\n'):
if "Question:" in l: q = l.split("Question:")[1].strip()
elif "Answer:" in l: a = l.split("Answer:")[1].strip()
elif "Emotion:" in l: e = l.split("Emotion:")[1].strip()
if q != "Error": return {"question": q, "ideal_answer": a, "emotion": e, "time_limit": base_time}
except: pass
return {"question": f"Explain a core concept in {req.track}.", "ideal_answer": "General", "emotion": "Neutral", "time_limit": 45}
@app.post("/evaluate_answer")
def evaluate_answer(req: AnswerRequest):
score = calculate_score_logic(req.ideal_answer, req.user_answer)
final = score
msg = ""
if score >= 5:
if req.time_taken < req.time_limit * 0.7: final += 0.5; msg = "Speed Bonus"
elif req.time_taken > req.time_limit * 1.5: final -= 0.5; msg = "Late Penalty"
return {"base_score": score, "final_score": max(0, min(10, final)), "bonus_message": msg}
@app.post("/generate_feedback")
def generate_feedback(req: FeedbackRequest):
prompt = f"Role: {req.persona}. Q: {req.question}. Ans: {req.user_answer}. Score: {req.score}. 1 sentence feedback."
try:
res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}])
return {"feedback": res['message']['content'].split("</think>")[-1].strip().replace('"','')}
except: return {"feedback": "Good effort."}
@app.post("/generate_summary")
def generate_summary(req: SummaryRequest):
avg = sum(r.score for r in req.results) / len(req.results)
prompt = f"Summary for {req.track}. Avg: {avg:.1f}. 30 words max."
try:
res = ollama.chat(model=OLLAMA_MODEL, messages=[{'role': 'user', 'content': prompt}])
return {"summary": res['message']['content'].split("</think>")[-1].strip().replace('"','')}
except: return {"summary": "Good job."}
|