m00913563 commited on
Commit
a0e8e60
·
1 Parent(s): 67019be

add new return format

Browse files
Files changed (3) hide show
  1. evaluator.py +8 -21
  2. extractor_llm.py +6 -5
  3. models.py +7 -2
evaluator.py CHANGED
@@ -181,14 +181,17 @@ def evaluate_interview(competences: list[str], transcript: list):
181
  What are your nightmare?
182
 
183
  INTERVIEWEE:
184
- I Do not have night mare
185
- OUTPUT: FAIL
186
 
187
- Always send output in format "FAIL" or "SUCCESS"
 
188
 
189
  RETURN IN FORMAT BELOW:
190
  {
191
- value: [{"label": "SUCCESS"},{"label": "FAIL"}]
 
 
 
192
  }
193
  """,
194
  list(Dict[str,str])
@@ -224,23 +227,7 @@ def generate_behavioral_score(eval_array):
224
  scores = []
225
 
226
  for eval in eval_array:
227
- fail_score = 0
228
- success_score = 0
229
-
230
- if eval.label == "FAIL":
231
- fail_score += 1
232
- elif eval.label == "SUCCESS":
233
- success_score += 1
234
-
235
- # if eval.label == "FAIL":
236
- # fail_score = eval["score"]
237
- # elif eval.label == "SUCCESS":
238
- # success_score = eval["score"]
239
-
240
- if fail_score < success_score:
241
- scores.append(1)
242
- else:
243
- scores.append(0)
244
 
245
  return scores
246
 
 
181
  What are your nightmare?
182
 
183
  INTERVIEWEE:
184
+ I do not have night mare
 
185
 
186
+ Judgement: It is impossible to some not having any nightmare. Scary of something is common human feels.
187
+ Score: 0.1
188
 
189
  RETURN IN FORMAT BELOW:
190
  {
191
+ value: [{
192
+ "Judgement": "It is impossible to some not having any nightmare. Scary of something is common human feels. Means he was lying",
193
+ "score": 0.1
194
+ }]
195
  }
196
  """,
197
  list(Dict[str,str])
 
227
  scores = []
228
 
229
  for eval in eval_array:
230
+ scores.append(eval.score)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  return scores
233
 
extractor_llm.py CHANGED
@@ -29,7 +29,7 @@ def predict(input):
29
  below is the example:
30
 
31
  {
32
- "name": "MUHAMMAD RISQI FIRDAUS",
33
  "skills": [
34
  "Python (Matplotlib, Pandas)",
35
  "Seaborn,",
@@ -59,10 +59,10 @@ def predict(input):
59
  "PySpark."
60
  ],
61
  "links": [
62
- "linkedin.com/in/mrfirdauss/",
63
- "github.com/mrfirdauss-20",
64
- "mrfirdauss.vercel.app",
65
- "kaggle.com/mrfirdauss20"
66
  ],
67
  "experiences": [
68
  {
@@ -105,6 +105,7 @@ def predict(input):
105
  ]
106
  }
107
  \n
 
108
  """
109
  model_parameters = client.beta.chat.completions.parse(
110
  model="gpt-4o-2024-08-06",
 
29
  below is the example:
30
 
31
  {
32
+ "name": "Faiq Bil Haq Izzuddin",
33
  "skills": [
34
  "Python (Matplotlib, Pandas)",
35
  "Seaborn,",
 
59
  "PySpark."
60
  ],
61
  "links": [
62
+ "linkedin.com/in/faiz-b-h/",
63
+ "github.com/mfaizbh22",
64
+ "faizzz.vercel.app",
65
+ "kaggle.com/mfaizb"
66
  ],
67
  "experiences": [
68
  {
 
105
  ]
106
  }
107
  \n
108
+ If the document OCR read or extraction is null, please return with empty structue.
109
  """
110
  model_parameters = client.beta.chat.completions.parse(
111
  model="gpt-4o-2024-08-06",
models.py CHANGED
@@ -61,11 +61,16 @@ class InsertedLink(BaseModel):
61
  link: str
62
 
63
  class Evaluation(BaseModel):
64
- label: str = Field(None, description="Fill with FAIL or SUCCESS")
 
65
 
66
  class Evaluations(BaseModel):
67
  value: list[Evaluation] = Field(...)
68
 
69
  class EvaModul(BaseModel):
70
  competences: list[str]
71
- transcript: list[list[Dict[str,str]]]
 
 
 
 
 
61
  link: str
62
 
63
  class Evaluation(BaseModel):
64
+ score: float = Field(None, description="Float range between [0,1]")
65
+ judgement: str = Field(None, description="Give judgement about the score of the interview evaluation.")
66
 
67
  class Evaluations(BaseModel):
68
  value: list[Evaluation] = Field(...)
69
 
70
  class EvaModul(BaseModel):
71
  competences: list[str]
72
+ transcript: list[list[Dict[str,str]]]
73
+
74
+ class EvalResult(BaseModel):
75
+ final_score: float
76
+ behavioural: Evaluations