from openai import OpenAI from models import Evaluations,EvalResult from typing import List, Dict import json tags = {'AI': "This one is the competence description"} #list of competence to save, better to hit db. client = OpenAI() def generate_model_parameters(skill: str, transcript: str): model_parameters = { "model":"gpt-4-0125-preview", "messages":[ {"role": "system", "content": f""" You are tasked with evaluating a transcript of an IT job interview. The interview that is conducted in the transcript is technical. You need sufficient IT knowledge since you will evaluate the answer of the interviewee to determine whether the interviewee answer correctly or not. You will output "SUCCESS" if the interviewee's answer is deemed correct and "FAIL" if it's deemed false. Below are 5 examples of correct answers. Here are 5 examples: EXAMPLE 1: SKILL TO BE EVALUATED: Python INTERVIEWER: What is the use of zip () in python? INTERVIEWEE: The zip returns an iterator and takes iterable as argument. These iterables can be list, tuple, dictionary etc. It maps similar index of every iterable to make a single entity. OUTPUT: SUCCESS EXAMPLE 2: SKILL TO BE EVALUATED: Python INTERVIEWER: What will be the output of the following? name=["swati","shweta"] age=[10,20] new_entity-zip(name,age) new_entity-set(new_entity) print(new_entity) INTERVIEWEE: The output is {{('shweta', 20), ('swati', 10)}} OUTPUT: SUCCESS EXAMPLE 3: SKILL TO BE EVALUATED: Python INTERVIEWER: What will be the output of the following? a=["1","2","3"] b=["a","b","c"] c=[x+y for x, y in zip(a,b)] print(c) INTERVIEWEE: The output is: ['1a', '2b', '3c'] OUTPUT: SUCCESS EXAMPLE 4: SKILL TO BE EVALUATED: Python INTERVIEWER: What will be the output of the following? str="apple#banana#kiwi#orange" print(str.split("#",2)) INTERVIEWEE: ['apple', 'banana', 'kiwi#orange'] OUTPUT: SUCCESS EXAMPLE 5: SKILL TO BE EVALUATED: Python INTERVIEWER: What are python modules? Name some commonly used built-in modules in Python? INTERVIEWEE: Python modules are files containing Python code. This code can either be function classes or variables. A Python module is a .py file containing executable code. Some of the commonly used built-in modules are: - os - sys - math - random - data time - json OUTPUT: SUCCESS Note that the examples that I give above have the correct answer. Your job is to generate the output only (SUCCESS OR FAIL). You don't need to explain your justification. SKILL TO BE EVALUATED: {skill} {transcript} """}, ] } return model_parameters def gpt_evaluator(payload, fewshot, response_format): print("-----tes") print(fewshot) print(payload) res = [] for i in payload: response = client.beta.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ {"role": "system", "content": fewshot}, {"role": "user", "content": (i)}, ], response_format=response_format) json_str = response.choices[0].message.parsed res.append(json_str.value) return res def extract_competences_and_responses(competences: list[str], transcripts: list[dict]): responses = [] for i in range(len(competences)): transcript = transcripts[i] response = "" for idx, chat in enumerate(transcript): # logger.info(chat) response += chat["answer"] if idx < len(transcript) - 1: response += "\n" responses.append(response) return responses def evaluate_interview(competences: list[str], transcript: list, lang: str = 'en'): # global tags model_inputs = [] responses = extract_competences_and_responses(transcript["comp_beha"], transcript["behavioral"]) print(len(competences)) print(len(responses)) # pprint(transcript) for i in range(len(transcript["comp_beha"])): competence = transcript["comp_beha"][i] response = responses[i] text = "KNOWLEDGE:\n" knowledge_exist = False text += f"\nCOMPETENCE: {competence}\n\n" text += f"RESPONSE:\n{response}" model_inputs.append(text) print("------") ## TODO: change to gpt idn = """ CONTOH 1: KETERAMPILAN YANG DINILAI: Kejujuran PEWAWANCARA: Apa mimpi burukmu? PESERTA WAWANCARA: Saya tidak punya mimpi buruk. Penilaian: Tidak mungkin seseorang tidak pernah mengalami mimpi buruk. Rasa takut terhadap sesuatu adalah hal yang umum dirasakan manusia. Skor: 0.1 CONTOH 2: PEWAWANCARA: Bisakah Anda menceritakan saat Anda harus men-debug masalah yang sangat sulit di lingkungan produksi? PESERTA WAWANCARA: Di pekerjaan saya sebelumnya, kami menggunakan arsitektur berbasis mikroservis yang dideploy di Kubernetes. Suatu pagi, kami mulai menerima peringatan bahwa layanan autentikasi pengguna kami gagal secara intermiten, dan pengguna tidak bisa masuk. Sebagai engineer yang sedang bertugas, tanggung jawab saya adalah segera mengidentifikasi akar permasalahan dan mengembalikan layanan ke fungsionalitas penuh tanpa memengaruhi layanan lain yang bergantung padanya. Saya mulai dengan memeriksa log di Kibana dan melihat bahwa beberapa pod untuk layanan autentikasi terus-menerus restart. Saya lalu memeriksa metrik penggunaan resource di Prometheus dan melihat lonjakan memori sebelum setiap crash. Saya curiga terjadi memory leak akibat perubahan terbaru, jadi saya rollback ke image container sebelumnya untuk menstabilkan layanan. Setelah stabil, saya menelusuri commit terbaru dan menemukan penggunaan session store in-memory baru yang tidak melepaskan sesi lama dengan benar. Saya menulis skrip analisis heap dump cepat, mengonfirmasi kebocoran memori tersebut, dan memperbaiki session store dengan cache LRU yang terbatas. Perbaikannya dideploy di hari yang sama, dan masalah tidak pernah terjadi lagi. Laporan postmortem yang saya tulis juga mendorong tim untuk mengadopsi profiling memori untuk semua komponen layanan baru. Waktu penyelesaian insiden kami meningkat sekitar 30% di kuartal berikutnya berkat perbaikan proses tersebut. """ en = """ Here are 2 examples: EXAMPLE 1: SKILL TO BE EVALUATED: Honest INTERVIEWER: What are your nightmare? INTERVIEWEE: I do not have night mare Judgement: It is impossible to some not having any nightmare. Scary of something is common human feels. Score: 0.1 EXAMPLE 2: INTERVIEWER: Can you tell me about a time you had to debug a particularly difficult issue in a production environment? INTERVIEWEE: At my previous job, we had a microservices-based architecture deployed on Kubernetes. One morning, we started getting alerts that our user authentication service was intermittently failing, and users couldn’t log in. As the engineer on call, my responsibility was to quickly identify the root cause and restore the service to full functionality without affecting other dependent services. I began by checking the logs in Kibana and noticed that some of the pods for the authentication service were repeatedly restarting. I then checked the resource usage metrics in Prometheus and saw a memory spike before each crash. I suspected a memory leak introduced by a recent change, so I rolled back to the previous container image to stabilize the service. After stabilizing, I dug deeper into the recent commits and found a new in-memory session store that was not properly releasing old sessions. I wrote a quick heap dump analysis script, confirmed the leak, and patched the session store to use a bounded LRU cache instead. The fix was deployed the same day, and the issue never recurred. The postmortem I wrote also led to the team adopting memory profiling for all new service components. Our incident resolution time improved by about 30% over the next quarter due to those process improvements. RETURN IN FORMAT BELOW: { value: [{ "Judgement": "It is impossible to some not having any nightmare. Scary of something is common human feels. Means he was lying", "score": 0.1 }, { "Judgement: "The candidate delivered a clear, concise STAR response that effectively demonstrated strong technical skills, composure under pressure, and a methodical approach to problem-solving in a production environment. The use of appropriate tools (Kibana, Prometheus), the decision to roll back, and the successful root cause analysis showed depth of experience. The result was measurable and impactful, indicating not just resolution but long-term improvement. Slightly more context on user or business impact would make it perfect, but overall, this is an excellent response that would strongly support a hiring decision." "score": 0.95 } ] } """ result = gpt_evaluator(model_inputs, en if lang == 'en' else idn, Evaluations ) ## output: final_score = 0 behavioral_scores = generate_behavioral_score(result) technical_scores = generate_technical_score(transcript["comp_tech"], transcript["technical"]) final_score = aggregate_scores(behavioral_scores, technical_scores) return EvalResult(final_score=final_score, details=result) def aggregate_scores(b: list[int], t: list[int]): total_score = 0 alls = b + t for i in range(len(alls)): score = alls[i] total_score += score return (total_score / len(b)) * 100 def generate_behavioral_score(eval_array): print(eval_array) scores = [] for eval in eval_array: scores.append(eval.score) return scores def generate_technical_score(skills: str, transcript: str): # total_score = 0 scores = [] for idx, skill in enumerate(skills): chat = transcript[idx] if len(chat) > 0: # print(chat) transcript_text = f"INTERVIEWEE:\n{chat[0]['question'].lstrip('TECHNICAL: ')}\n\nINTERVIEWER:\n{chat[0]['answer']}" # TODO: change to structured output model_parameters = generate_model_parameters(skill, transcript_text) completion = client.chat.completions.create( **model_parameters ) generated = completion.choices[0].message.content score = 1 if "SUCCESS" in generated else 0 # total_score += score scores.append(score) else: scores.append(-1) return scores