Spaces:
Sleeping
Sleeping
Jose Martin Rangel Espinoza commited on
Commit ·
7cb8fd3
1
Parent(s): 0b77f23
App
Browse files- main.py +263 -0
- poetry.lock +0 -0
- pyproject.toml +24 -0
- requirements.txt +61 -0
main.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import openai
|
| 3 |
+
import json
|
| 4 |
+
import tempfile
|
| 5 |
+
import os
|
| 6 |
+
from gtts import gTTS
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
|
| 9 |
+
# Cargar variables de entorno desde el archivo .env
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
# Configura la API Key de OpenAI desde el .env o variable de entorno
|
| 13 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
| 14 |
+
|
| 15 |
+
# -----------------------------
|
| 16 |
+
# Función para generar 20 preguntas de entrevista según el idioma seleccionado
|
| 17 |
+
# -----------------------------
|
| 18 |
+
def generate_interview_questions(job_title, job_desc, language):
|
| 19 |
+
if language.lower() == "english":
|
| 20 |
+
prompt = (
|
| 21 |
+
f"Generate 20 interview questions for the job vacancy '{job_title}' with the following description: {job_desc}. "
|
| 22 |
+
"The questions should range from basic to advanced. "
|
| 23 |
+
"Return them in JSON format as a list of strings."
|
| 24 |
+
)
|
| 25 |
+
else:
|
| 26 |
+
prompt = (
|
| 27 |
+
f"Genera 20 preguntas de entrevista para la vacante '{job_title}' con la siguiente descripción: {job_desc}. "
|
| 28 |
+
"Las preguntas deben ir de las más básicas a las más elaboradas. "
|
| 29 |
+
"Devuélvelas en formato JSON como una lista de strings."
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
response = openai.chat.completions.create(
|
| 33 |
+
model="gpt-3.5-turbo",
|
| 34 |
+
messages=[{"role": "user", "content": prompt}],
|
| 35 |
+
temperature=0.7
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
result_text = response.choices[0].message.content
|
| 39 |
+
try:
|
| 40 |
+
questions = json.loads(result_text)
|
| 41 |
+
if isinstance(questions, list):
|
| 42 |
+
return questions
|
| 43 |
+
else:
|
| 44 |
+
return [q.strip() for q in result_text.split("\n") if q.strip()]
|
| 45 |
+
except Exception:
|
| 46 |
+
return [q.strip() for q in result_text.split("\n") if q.strip()]
|
| 47 |
+
|
| 48 |
+
# -----------------------------
|
| 49 |
+
# Función para convertir texto a voz (TTS) usando gTTS, según el idioma
|
| 50 |
+
# -----------------------------
|
| 51 |
+
def text_to_speech(text, language):
|
| 52 |
+
# Comprobar que el texto no esté vacío
|
| 53 |
+
if not text.strip():
|
| 54 |
+
raise ValueError("El texto proporcionado para TTS está vacío.")
|
| 55 |
+
lang = "en" if language.lower() == "english" else "es"
|
| 56 |
+
tts = gTTS(text, lang=lang)
|
| 57 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
| 58 |
+
tts.save(temp_file.name)
|
| 59 |
+
return temp_file.name
|
| 60 |
+
|
| 61 |
+
# -----------------------------
|
| 62 |
+
# Función para transcribir audio (STT) usando la nueva interfaz de Whisper
|
| 63 |
+
# -----------------------------
|
| 64 |
+
def transcribe_audio(audio_file):
|
| 65 |
+
with open(audio_file, "rb") as af:
|
| 66 |
+
response = openai.audio.transcriptions.create(
|
| 67 |
+
file=af,
|
| 68 |
+
model="whisper-1"
|
| 69 |
+
)
|
| 70 |
+
# Acceder al atributo .text en lugar de usar índices
|
| 71 |
+
return response.text
|
| 72 |
+
|
| 73 |
+
# -----------------------------
|
| 74 |
+
# Función para evaluar la respuesta del candidato, ajustada al idioma
|
| 75 |
+
# -----------------------------
|
| 76 |
+
def evaluate_answer(question, candidate_answer, job_desc, language):
|
| 77 |
+
if language.lower() == "english":
|
| 78 |
+
prompt = (
|
| 79 |
+
f"Given the job description:\n{job_desc}\n\n"
|
| 80 |
+
f"Interview question:\n{question}\n\n"
|
| 81 |
+
f"Candidate's answer:\n{candidate_answer}\n\n"
|
| 82 |
+
"Evaluate the candidate's answer in terms of suitability for the position. "
|
| 83 |
+
"Provide a score from 0 to 100 and a brief comment on strengths and areas for improvement. "
|
| 84 |
+
"Return the result in JSON format with keys 'score' and 'comment'."
|
| 85 |
+
)
|
| 86 |
+
else:
|
| 87 |
+
prompt = (
|
| 88 |
+
f"Dada la descripción de la vacante:\n{job_desc}\n\n"
|
| 89 |
+
f"Pregunta de la entrevista:\n{question}\n\n"
|
| 90 |
+
f"Respuesta del candidato:\n{candidate_answer}\n\n"
|
| 91 |
+
"Evalúa la respuesta del candidato en términos de adecuación para el puesto. "
|
| 92 |
+
"Proporciona un puntaje de 0 a 100 y un breve comentario sobre fortalezas y puntos a mejorar. "
|
| 93 |
+
"Devuelve el resultado en formato JSON con las claves 'score' y 'comment'."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
response = openai.chat.completions.create(
|
| 97 |
+
model="gpt-3.5-turbo",
|
| 98 |
+
messages=[{"role": "user", "content": prompt}],
|
| 99 |
+
temperature=0
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
result_text = response.choices[0].message.content
|
| 103 |
+
try:
|
| 104 |
+
result_json = json.loads(result_text)
|
| 105 |
+
score = result_json.get("score", 0)
|
| 106 |
+
comment = result_json.get("comment", "")
|
| 107 |
+
return score, comment
|
| 108 |
+
except Exception:
|
| 109 |
+
return 0, "No se pudo evaluar la respuesta correctamente."
|
| 110 |
+
|
| 111 |
+
# -----------------------------
|
| 112 |
+
# Función para generar imagen de felicitación según el idioma
|
| 113 |
+
# -----------------------------
|
| 114 |
+
def generate_congratulations_image(language):
|
| 115 |
+
if language.lower() == "english":
|
| 116 |
+
prompt = "A cheerful and professional congratulatory image for a successful job interview candidate."
|
| 117 |
+
else:
|
| 118 |
+
prompt = "Una imagen de felicitación alegre y profesional para un candidato exitoso en una entrevista de trabajo."
|
| 119 |
+
|
| 120 |
+
response = openai.image_generations.create(
|
| 121 |
+
prompt=prompt,
|
| 122 |
+
n=1,
|
| 123 |
+
size="512x512"
|
| 124 |
+
)
|
| 125 |
+
image_url = response["data"][0]["url"]
|
| 126 |
+
return image_url
|
| 127 |
+
|
| 128 |
+
# -----------------------------
|
| 129 |
+
# Estado global para almacenar el progreso de la entrevista
|
| 130 |
+
# -----------------------------
|
| 131 |
+
state = {
|
| 132 |
+
"questions": [],
|
| 133 |
+
"current_index": 0,
|
| 134 |
+
"scores": [],
|
| 135 |
+
"evaluations": [],
|
| 136 |
+
"job_desc": "",
|
| 137 |
+
"job_title": "",
|
| 138 |
+
"language": "es" # Por defecto en Español
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
# -----------------------------
|
| 142 |
+
# Función para iniciar la entrevista
|
| 143 |
+
# -----------------------------
|
| 144 |
+
def start_interview(job_title, job_desc, language, api_key):
|
| 145 |
+
if api_key and api_key.strip():
|
| 146 |
+
openai.api_key = api_key.strip()
|
| 147 |
+
|
| 148 |
+
state["job_title"] = job_title
|
| 149 |
+
state["job_desc"] = job_desc
|
| 150 |
+
state["language"] = language
|
| 151 |
+
state["questions"] = generate_interview_questions(job_title, job_desc, language)
|
| 152 |
+
state["current_index"] = 0
|
| 153 |
+
state["scores"] = []
|
| 154 |
+
state["evaluations"] = []
|
| 155 |
+
|
| 156 |
+
if len(state["questions"]) == 0:
|
| 157 |
+
return "No se generaron preguntas.", None, "0 preguntas", ""
|
| 158 |
+
|
| 159 |
+
question = state["questions"][state["current_index"]]
|
| 160 |
+
# Verificar que la pregunta no esté vacía
|
| 161 |
+
if not question.strip():
|
| 162 |
+
return "La pregunta generada está vacía.", None, "0 preguntas", ""
|
| 163 |
+
|
| 164 |
+
audio_file = text_to_speech(question, language)
|
| 165 |
+
return question, audio_file, f"Pregunta 1 de {len(state['questions'])}", ""
|
| 166 |
+
|
| 167 |
+
# -----------------------------
|
| 168 |
+
# Función para enviar y evaluar la respuesta del candidato
|
| 169 |
+
# -----------------------------
|
| 170 |
+
def submit_answer(audio_file):
|
| 171 |
+
candidate_answer = transcribe_audio(audio_file)
|
| 172 |
+
idx = state["current_index"]
|
| 173 |
+
question = state["questions"][idx]
|
| 174 |
+
score, comment = evaluate_answer(question, candidate_answer, state["job_desc"], state["language"])
|
| 175 |
+
state["scores"].append(score)
|
| 176 |
+
state["evaluations"].append(comment)
|
| 177 |
+
result_text = (
|
| 178 |
+
f"Respuesta transcrita: {candidate_answer}\n"
|
| 179 |
+
f"Puntuación: {score}\nComentario: {comment}"
|
| 180 |
+
)
|
| 181 |
+
return result_text
|
| 182 |
+
|
| 183 |
+
# -----------------------------
|
| 184 |
+
# Función para avanzar a la siguiente pregunta o finalizar la entrevista
|
| 185 |
+
# -----------------------------
|
| 186 |
+
def next_question():
|
| 187 |
+
total_questions = len(state["questions"])
|
| 188 |
+
state["current_index"] += 1
|
| 189 |
+
|
| 190 |
+
if state["current_index"] < total_questions:
|
| 191 |
+
question = state["questions"][state["current_index"]]
|
| 192 |
+
audio_file = text_to_speech(question, state["language"])
|
| 193 |
+
return question, audio_file, f"Pregunta {state['current_index']+1} de {total_questions}", None, "", None
|
| 194 |
+
else:
|
| 195 |
+
average_score = sum(state["scores"]) / len(state["scores"]) if state["scores"] else 0
|
| 196 |
+
if average_score >= 70:
|
| 197 |
+
image_url = generate_congratulations_image(state["language"])
|
| 198 |
+
if state["language"].lower() == "english":
|
| 199 |
+
final_message_text = f"Congratulations! You scored an average of {average_score:.2f}%. You are an outstanding candidate!"
|
| 200 |
+
else:
|
| 201 |
+
final_message_text = f"¡Felicidades! Has obtenido un promedio de {average_score:.2f}%. ¡Eres un candidato destacado!"
|
| 202 |
+
final_audio = text_to_speech(final_message_text, state["language"])
|
| 203 |
+
return "", None, "Entrevista completa", image_url, final_message_text, final_audio
|
| 204 |
+
else:
|
| 205 |
+
if state["language"].lower() == "english":
|
| 206 |
+
final_message_text = (
|
| 207 |
+
f"Your average score is {average_score:.2f}%. Please work on the following points for improvement: "
|
| 208 |
+
"[Adjust the feedback points as needed]."
|
| 209 |
+
)
|
| 210 |
+
else:
|
| 211 |
+
final_message_text = (
|
| 212 |
+
f"Tu promedio fue de {average_score:.2f}%. Te recomendamos trabajar en los siguientes puntos: "
|
| 213 |
+
"[Ajusta aquí los puntos a mejorar según la evaluación]."
|
| 214 |
+
)
|
| 215 |
+
final_audio = text_to_speech(final_message_text, state["language"])
|
| 216 |
+
return "", None, "Entrevista completa", None, final_message_text, final_audio
|
| 217 |
+
|
| 218 |
+
# -----------------------------
|
| 219 |
+
# Construcción de la interfaz con Gradio
|
| 220 |
+
# -----------------------------
|
| 221 |
+
with gr.Blocks() as demo:
|
| 222 |
+
gr.Markdown("# Interview Warmup")
|
| 223 |
+
gr.Markdown("Simulador de entrevista de trabajo con TTS y STT (usando la API más reciente de OpenAI)")
|
| 224 |
+
|
| 225 |
+
with gr.Row():
|
| 226 |
+
job_title_input = gr.Textbox(label="Título de la Vacante / Job Title")
|
| 227 |
+
job_desc_input = gr.Textbox(label="Descripción de la Vacante / Job Description", lines=4)
|
| 228 |
+
language_input = gr.Radio(choices=["Español", "English"], value="Español", label="Idioma / Language")
|
| 229 |
+
api_key_input = gr.Textbox(label="OpenAI API Key (Opcional)", type="password")
|
| 230 |
+
|
| 231 |
+
start_button = gr.Button("Iniciar Entrevista / Start Interview")
|
| 232 |
+
|
| 233 |
+
question_output = gr.Textbox(label="Pregunta / Question", interactive=False)
|
| 234 |
+
question_audio = gr.Audio(label="Audio de la Pregunta / Question Audio", type="filepath", interactive=False)
|
| 235 |
+
question_counter = gr.Textbox(label="Estado de la Entrevista / Interview Progress", interactive=False)
|
| 236 |
+
answer_feedback = gr.Textbox(label="Evaluación de tu Respuesta / Your Answer Evaluation", interactive=False)
|
| 237 |
+
|
| 238 |
+
candidate_audio = gr.Audio(type="filepath", label="Tu respuesta / Your Answer (Graba aquí)")
|
| 239 |
+
submit_button = gr.Button("Enviar Respuesta / Submit Answer")
|
| 240 |
+
next_button = gr.Button("Siguiente Pregunta / Next Question")
|
| 241 |
+
|
| 242 |
+
final_image = gr.Image(label="Imagen de Felicitación / Congratulations Image")
|
| 243 |
+
final_message = gr.Textbox(label="Mensaje Final / Final Message", interactive=False)
|
| 244 |
+
final_audio = gr.Audio(label="Audio Final / Final Audio", type="filepath", interactive=False)
|
| 245 |
+
|
| 246 |
+
# Conectar botones con funciones
|
| 247 |
+
start_button.click(
|
| 248 |
+
start_interview,
|
| 249 |
+
inputs=[job_title_input, job_desc_input, language_input, api_key_input],
|
| 250 |
+
outputs=[question_output, question_audio, question_counter, answer_feedback]
|
| 251 |
+
)
|
| 252 |
+
submit_button.click(
|
| 253 |
+
submit_answer,
|
| 254 |
+
inputs=[candidate_audio],
|
| 255 |
+
outputs=[answer_feedback]
|
| 256 |
+
)
|
| 257 |
+
next_button.click(
|
| 258 |
+
next_question,
|
| 259 |
+
inputs=[],
|
| 260 |
+
outputs=[question_output, question_audio, question_counter, final_image, final_message, final_audio]
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
demo.launch()
|
poetry.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pyproject.toml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "interviewwarmup"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = ""
|
| 5 |
+
authors = [
|
| 6 |
+
{name = "Jose Martin Rangel Espinoza",email = "jrangelespinoza@gmail.com"}
|
| 7 |
+
]
|
| 8 |
+
readme = "README.md"
|
| 9 |
+
requires-python = ">=3.11"
|
| 10 |
+
dependencies = [
|
| 11 |
+
"gradio (>=5.20.1,<6.0.0)",
|
| 12 |
+
"openai (>=1.65.4,<2.0.0)",
|
| 13 |
+
"gtts (>=2.5.4,<3.0.0)",
|
| 14 |
+
"python-dotenv (>=1.0.1,<2.0.0)",
|
| 15 |
+
"setuptools (>=75.8.2,<76.0.0)"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
[build-system]
|
| 20 |
+
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
| 21 |
+
build-backend = "poetry.core.masonry.api"
|
| 22 |
+
|
| 23 |
+
[tool.poetry.requires-plugins]
|
| 24 |
+
poetry-plugin-export = ">=1.8"
|
requirements.txt
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiofiles==23.2.1 ; python_version >= "3.11"
|
| 2 |
+
annotated-types==0.7.0 ; python_version >= "3.11"
|
| 3 |
+
anyio==4.8.0 ; python_version >= "3.11"
|
| 4 |
+
audioop-lts==0.2.1 ; python_version >= "3.13"
|
| 5 |
+
certifi==2025.1.31 ; python_version >= "3.11"
|
| 6 |
+
charset-normalizer==3.4.1 ; python_version >= "3.11"
|
| 7 |
+
click==8.1.8 ; python_version >= "3.11"
|
| 8 |
+
colorama==0.4.6 ; python_version >= "3.11" and platform_system == "Windows"
|
| 9 |
+
distro==1.9.0 ; python_version >= "3.11"
|
| 10 |
+
fastapi==0.115.11 ; python_version >= "3.11"
|
| 11 |
+
ffmpy==0.3.2 ; python_version >= "3.12"
|
| 12 |
+
ffmpy==0.5.0 ; python_version == "3.11"
|
| 13 |
+
filelock==3.17.0 ; python_version >= "3.11"
|
| 14 |
+
fsspec==2025.3.0 ; python_version >= "3.11"
|
| 15 |
+
gradio-client==1.7.2 ; python_version >= "3.11"
|
| 16 |
+
gradio==5.20.1 ; python_version >= "3.11"
|
| 17 |
+
groovy==0.1.2 ; python_version >= "3.11"
|
| 18 |
+
gtts==2.5.4 ; python_version >= "3.11"
|
| 19 |
+
h11==0.14.0 ; python_version >= "3.11"
|
| 20 |
+
httpcore==1.0.7 ; python_version >= "3.11"
|
| 21 |
+
httpx==0.28.1 ; python_version >= "3.11"
|
| 22 |
+
huggingface-hub==0.29.2 ; python_version >= "3.11"
|
| 23 |
+
idna==3.10 ; python_version >= "3.11"
|
| 24 |
+
jinja2==3.1.6 ; python_version >= "3.11"
|
| 25 |
+
jiter==0.8.2 ; python_version >= "3.11"
|
| 26 |
+
markdown-it-py==2.2.0 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 27 |
+
markupsafe==2.1.5 ; python_version >= "3.11"
|
| 28 |
+
mdurl==0.1.2 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 29 |
+
numpy==2.2.3 ; python_version >= "3.11"
|
| 30 |
+
openai==1.65.4 ; python_version >= "3.11"
|
| 31 |
+
orjson==3.10.15 ; python_version >= "3.11"
|
| 32 |
+
packaging==24.2 ; python_version >= "3.11"
|
| 33 |
+
pandas==2.2.3 ; python_version >= "3.11"
|
| 34 |
+
pillow==11.1.0 ; python_version >= "3.11"
|
| 35 |
+
pydantic-core==2.27.2 ; python_version >= "3.11"
|
| 36 |
+
pydantic==2.10.6 ; python_version >= "3.11"
|
| 37 |
+
pydub==0.25.1 ; python_version >= "3.11"
|
| 38 |
+
pygments==2.19.1 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 39 |
+
python-dateutil==2.9.0.post0 ; python_version >= "3.11"
|
| 40 |
+
python-dotenv==1.0.1 ; python_version >= "3.11"
|
| 41 |
+
python-multipart==0.0.20 ; python_version >= "3.11"
|
| 42 |
+
pytz==2025.1 ; python_version >= "3.11"
|
| 43 |
+
pyyaml==6.0.2 ; python_version >= "3.11"
|
| 44 |
+
requests==2.32.3 ; python_version >= "3.11"
|
| 45 |
+
rich==13.9.4 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 46 |
+
ruff==0.9.10 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 47 |
+
safehttpx==0.1.6 ; python_version >= "3.11"
|
| 48 |
+
semantic-version==2.10.0 ; python_version >= "3.11"
|
| 49 |
+
setuptools==75.8.2 ; python_version >= "3.11"
|
| 50 |
+
shellingham==1.5.4 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 51 |
+
six==1.17.0 ; python_version >= "3.11"
|
| 52 |
+
sniffio==1.3.1 ; python_version >= "3.11"
|
| 53 |
+
starlette==0.46.0 ; python_version >= "3.11"
|
| 54 |
+
tomlkit==0.13.2 ; python_version >= "3.11"
|
| 55 |
+
tqdm==4.67.1 ; python_version >= "3.11"
|
| 56 |
+
typer==0.15.2 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 57 |
+
typing-extensions==4.12.2 ; python_version >= "3.11"
|
| 58 |
+
tzdata==2025.1 ; python_version >= "3.11"
|
| 59 |
+
urllib3==2.3.0 ; python_version >= "3.11"
|
| 60 |
+
uvicorn==0.34.0 ; python_version >= "3.11" and sys_platform != "emscripten"
|
| 61 |
+
websockets==15.0.1 ; python_version >= "3.11"
|