Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import openai | |
| import json | |
| import tempfile | |
| import os | |
| from gtts import gTTS | |
| from dotenv import load_dotenv | |
| # Cargar variables de entorno desde el archivo .env | |
| load_dotenv() | |
| # Configura la API Key de OpenAI desde el .env o variable de entorno | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| # ----------------------------- | |
| # Función para generar 20 preguntas de entrevista según el idioma seleccionado | |
| # ----------------------------- | |
| def generate_interview_questions(job_title, job_desc, language): | |
| if language.lower() == "english": | |
| prompt = ( | |
| f"Generate 20 interview questions for the job vacancy '{job_title}' with the following description: {job_desc}. " | |
| "The questions should range from basic to advanced. " | |
| "Return them in JSON format as a list of strings." | |
| ) | |
| else: | |
| prompt = ( | |
| f"Genera 20 preguntas de entrevista para la vacante '{job_title}' con la siguiente descripción: {job_desc}. " | |
| "Las preguntas deben ir de las más básicas a las más elaboradas. " | |
| "Devuélvelas en formato JSON como una lista de strings." | |
| ) | |
| response = openai.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=0.7 | |
| ) | |
| result_text = response.choices[0].message.content | |
| try: | |
| questions = json.loads(result_text) | |
| if isinstance(questions, list): | |
| return questions | |
| else: | |
| return [q.strip() for q in result_text.split("\n") if q.strip()] | |
| except Exception: | |
| return [q.strip() for q in result_text.split("\n") if q.strip()] | |
| # ----------------------------- | |
| # Función para convertir texto a voz (TTS) usando gTTS, según el idioma | |
| # ----------------------------- | |
| def text_to_speech(text, language): | |
| # Comprobar que el texto no esté vacío | |
| if not text.strip(): | |
| raise ValueError("El texto proporcionado para TTS está vacío.") | |
| lang = "en" if language.lower() == "english" else "es" | |
| tts = gTTS(text, lang=lang) | |
| temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") | |
| tts.save(temp_file.name) | |
| return temp_file.name | |
| # ----------------------------- | |
| # Función para transcribir audio (STT) usando la nueva interfaz de Whisper | |
| # ----------------------------- | |
| def transcribe_audio(audio_file): | |
| with open(audio_file, "rb") as af: | |
| response = openai.audio.transcriptions.create( | |
| file=af, | |
| model="whisper-1" | |
| ) | |
| # Acceder al atributo .text en lugar de usar índices | |
| return response.text | |
| # ----------------------------- | |
| # Función para evaluar la respuesta del candidato, ajustada al idioma | |
| # ----------------------------- | |
| def evaluate_answer(question, candidate_answer, job_desc, language): | |
| if language.lower() == "english": | |
| prompt = ( | |
| f"Given the job description:\n{job_desc}\n\n" | |
| f"Interview question:\n{question}\n\n" | |
| f"Candidate's answer:\n{candidate_answer}\n\n" | |
| "Evaluate the candidate's answer in terms of suitability for the position. " | |
| "Provide a score from 0 to 100 and a brief comment on strengths and areas for improvement. " | |
| "Return the result in JSON format with keys 'score' and 'comment'." | |
| ) | |
| else: | |
| prompt = ( | |
| f"Dada la descripción de la vacante:\n{job_desc}\n\n" | |
| f"Pregunta de la entrevista:\n{question}\n\n" | |
| f"Respuesta del candidato:\n{candidate_answer}\n\n" | |
| "Evalúa la respuesta del candidato en términos de adecuación para el puesto. " | |
| "Proporciona un puntaje de 0 a 100 y un breve comentario sobre fortalezas y puntos a mejorar. " | |
| "Devuelve el resultado en formato JSON con las claves 'score' y 'comment'." | |
| ) | |
| response = openai.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=0 | |
| ) | |
| result_text = response.choices[0].message.content | |
| try: | |
| result_json = json.loads(result_text) | |
| score = result_json.get("score", 0) | |
| comment = result_json.get("comment", "") | |
| return score, comment | |
| except Exception: | |
| return 0, "No se pudo evaluar la respuesta correctamente." | |
| # ----------------------------- | |
| # Función para generar imagen de felicitación según el idioma | |
| # ----------------------------- | |
| def generate_congratulations_image(language): | |
| if language.lower() == "english": | |
| prompt = "A cheerful and professional congratulatory image for a successful job interview candidate." | |
| else: | |
| prompt = "Una imagen de felicitación alegre y profesional para un candidato exitoso en una entrevista de trabajo." | |
| response = openai.image_generations.create( | |
| prompt=prompt, | |
| n=1, | |
| size="512x512" | |
| ) | |
| image_url = response["data"][0]["url"] | |
| return image_url | |
| # ----------------------------- | |
| # Estado global para almacenar el progreso de la entrevista | |
| # ----------------------------- | |
| state = { | |
| "questions": [], | |
| "current_index": 0, | |
| "scores": [], | |
| "evaluations": [], | |
| "job_desc": "", | |
| "job_title": "", | |
| "language": "es" # Por defecto en Español | |
| } | |
| # ----------------------------- | |
| # Función para iniciar la entrevista | |
| # ----------------------------- | |
| def start_interview(job_title, job_desc, language, api_key): | |
| if api_key and api_key.strip(): | |
| openai.api_key = api_key.strip() | |
| state["job_title"] = job_title | |
| state["job_desc"] = job_desc | |
| state["language"] = language | |
| state["questions"] = generate_interview_questions(job_title, job_desc, language) | |
| state["current_index"] = 0 | |
| state["scores"] = [] | |
| state["evaluations"] = [] | |
| if len(state["questions"]) == 0: | |
| return "No se generaron preguntas.", None, "0 preguntas", "" | |
| question = state["questions"][state["current_index"]] | |
| # Verificar que la pregunta no esté vacía | |
| if not question.strip(): | |
| return "La pregunta generada está vacía.", None, "0 preguntas", "" | |
| audio_file = text_to_speech(question, language) | |
| return question, audio_file, f"Pregunta 1 de {len(state['questions'])}", "" | |
| # ----------------------------- | |
| # Función para enviar y evaluar la respuesta del candidato | |
| # ----------------------------- | |
| def submit_answer(audio_file): | |
| candidate_answer = transcribe_audio(audio_file) | |
| idx = state["current_index"] | |
| question = state["questions"][idx] | |
| score, comment = evaluate_answer(question, candidate_answer, state["job_desc"], state["language"]) | |
| state["scores"].append(score) | |
| state["evaluations"].append(comment) | |
| result_text = ( | |
| f"Respuesta transcrita: {candidate_answer}\n" | |
| f"Puntuación: {score}\nComentario: {comment}" | |
| ) | |
| return result_text | |
| # ----------------------------- | |
| # Función para avanzar a la siguiente pregunta o finalizar la entrevista | |
| # ----------------------------- | |
| def next_question(): | |
| total_questions = len(state["questions"]) | |
| state["current_index"] += 1 | |
| if state["current_index"] < total_questions: | |
| question = state["questions"][state["current_index"]] | |
| audio_file = text_to_speech(question, state["language"]) | |
| return question, audio_file, f"Pregunta {state['current_index']+1} de {total_questions}", None, "", None | |
| else: | |
| average_score = sum(state["scores"]) / len(state["scores"]) if state["scores"] else 0 | |
| if average_score >= 70: | |
| image_url = generate_congratulations_image(state["language"]) | |
| if state["language"].lower() == "english": | |
| final_message_text = f"Congratulations! You scored an average of {average_score:.2f}%. You are an outstanding candidate!" | |
| else: | |
| final_message_text = f"¡Felicidades! Has obtenido un promedio de {average_score:.2f}%. ¡Eres un candidato destacado!" | |
| final_audio = text_to_speech(final_message_text, state["language"]) | |
| return "", None, "Entrevista completa", image_url, final_message_text, final_audio | |
| else: | |
| if state["language"].lower() == "english": | |
| final_message_text = ( | |
| f"Your average score is {average_score:.2f}%. Please work on the following points for improvement: " | |
| "[Adjust the feedback points as needed]." | |
| ) | |
| else: | |
| final_message_text = ( | |
| f"Tu promedio fue de {average_score:.2f}%. Te recomendamos trabajar en los siguientes puntos: " | |
| "[Ajusta aquí los puntos a mejorar según la evaluación]." | |
| ) | |
| final_audio = text_to_speech(final_message_text, state["language"]) | |
| return "", None, "Entrevista completa", None, final_message_text, final_audio | |
| # ----------------------------- | |
| # Construcción de la interfaz con Gradio | |
| # ----------------------------- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Interview Warmup") | |
| gr.Markdown("Simulador de entrevista de trabajo con TTS y STT (usando la API más reciente de OpenAI)") | |
| with gr.Row(): | |
| job_title_input = gr.Textbox(label="Título de la Vacante / Job Title") | |
| job_desc_input = gr.Textbox(label="Descripción de la Vacante / Job Description", lines=4) | |
| language_input = gr.Radio(choices=["Español", "English"], value="Español", label="Idioma / Language") | |
| api_key_input = gr.Textbox(label="OpenAI API Key (Opcional)", type="password") | |
| start_button = gr.Button("Iniciar Entrevista / Start Interview") | |
| question_output = gr.Textbox(label="Pregunta / Question", interactive=False) | |
| question_audio = gr.Audio(label="Audio de la Pregunta / Question Audio", type="filepath", interactive=False) | |
| question_counter = gr.Textbox(label="Estado de la Entrevista / Interview Progress", interactive=False) | |
| answer_feedback = gr.Textbox(label="Evaluación de tu Respuesta / Your Answer Evaluation", interactive=False) | |
| candidate_audio = gr.Audio(type="filepath", label="Tu respuesta / Your Answer (Graba aquí)") | |
| submit_button = gr.Button("Enviar Respuesta / Submit Answer") | |
| next_button = gr.Button("Siguiente Pregunta / Next Question") | |
| final_image = gr.Image(label="Imagen de Felicitación / Congratulations Image") | |
| final_message = gr.Textbox(label="Mensaje Final / Final Message", interactive=False) | |
| final_audio = gr.Audio(label="Audio Final / Final Audio", type="filepath", interactive=False) | |
| # Conectar botones con funciones | |
| start_button.click( | |
| start_interview, | |
| inputs=[job_title_input, job_desc_input, language_input, api_key_input], | |
| outputs=[question_output, question_audio, question_counter, answer_feedback] | |
| ) | |
| submit_button.click( | |
| submit_answer, | |
| inputs=[candidate_audio], | |
| outputs=[answer_feedback] | |
| ) | |
| next_button.click( | |
| next_question, | |
| inputs=[], | |
| outputs=[question_output, question_audio, question_counter, final_image, final_message, final_audio] | |
| ) | |
| demo.launch() |