Spaces:
Runtime error
Runtime error
m00913563 commited on
Commit ·
1ee78e5
1
Parent(s): a0e8e60
fix return format
Browse files- app.py +2 -2
- evaluator.py +2 -2
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
from fastapi import FastAPI, HTTPException
|
| 2 |
-
from models import CVExtracted, EvaModul, JobAndCV, ClassificationResult, InsertedLink
|
| 3 |
import os
|
| 4 |
from io import BytesIO
|
| 5 |
# import extractor
|
|
@@ -90,7 +90,7 @@ async def extract(link: InsertedLink):
|
|
| 90 |
dictresult = extractor_llm.predict(text)
|
| 91 |
return dictresult
|
| 92 |
|
| 93 |
-
@app.post("/eval", response_model=
|
| 94 |
async def eval(eva: EvaModul):
|
| 95 |
transcript = extractor_helper.extract_technical(eva.competences, eva.transcript)
|
| 96 |
return evaluator.evaluate_interview(competences=eva.competences, transcript=transcript)
|
|
|
|
| 1 |
from fastapi import FastAPI, HTTPException
|
| 2 |
+
from models import CVExtracted, EvaModul, JobAndCV, ClassificationResult, InsertedLink, EvalResult
|
| 3 |
import os
|
| 4 |
from io import BytesIO
|
| 5 |
# import extractor
|
|
|
|
| 90 |
dictresult = extractor_llm.predict(text)
|
| 91 |
return dictresult
|
| 92 |
|
| 93 |
+
@app.post("/eval", response_model=EvalResult)
|
| 94 |
async def eval(eva: EvaModul):
|
| 95 |
transcript = extractor_helper.extract_technical(eva.competences, eva.transcript)
|
| 96 |
return evaluator.evaluate_interview(competences=eva.competences, transcript=transcript)
|
evaluator.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
from openai import OpenAI
|
| 2 |
-
from models import Evaluations
|
| 3 |
from typing import List, Dict
|
| 4 |
import json
|
| 5 |
tags = {'AI': "This one is the competence description"} #list of competence to save, better to hit db.
|
|
@@ -203,7 +203,7 @@ def evaluate_interview(competences: list[str], transcript: list):
|
|
| 203 |
|
| 204 |
final_score = aggregate_scores(behavioral_scores, technical_scores)
|
| 205 |
|
| 206 |
-
return final_score
|
| 207 |
|
| 208 |
def aggregate_scores(b: list[int], t: list[int]):
|
| 209 |
total_score = 0
|
|
|
|
| 1 |
from openai import OpenAI
|
| 2 |
+
from models import Evaluations,EvalResult
|
| 3 |
from typing import List, Dict
|
| 4 |
import json
|
| 5 |
tags = {'AI': "This one is the competence description"} #list of competence to save, better to hit db.
|
|
|
|
| 203 |
|
| 204 |
final_score = aggregate_scores(behavioral_scores, technical_scores)
|
| 205 |
|
| 206 |
+
return EvalResult(final_score=final_score, behavioural=result)
|
| 207 |
|
| 208 |
def aggregate_scores(b: list[int], t: list[int]):
|
| 209 |
total_score = 0
|