Spaces:
Sleeping
Sleeping
first_try
Browse filesThis is my first try
app.py
CHANGED
|
@@ -13,11 +13,86 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
| 13 |
class BasicAgent:
|
| 14 |
def __init__(self):
|
| 15 |
print("BasicAgent initialized.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
def __call__(self, question: str) -> str:
|
| 17 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 23 |
"""
|
|
@@ -146,11 +221,9 @@ with gr.Blocks() as demo:
|
|
| 146 |
gr.Markdown(
|
| 147 |
"""
|
| 148 |
**Instructions:**
|
| 149 |
-
|
| 150 |
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
| 151 |
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
| 152 |
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
| 153 |
-
|
| 154 |
---
|
| 155 |
**Disclaimers:**
|
| 156 |
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
|
|
|
| 13 |
class BasicAgent:
|
| 14 |
def __init__(self):
|
| 15 |
print("BasicAgent initialized.")
|
| 16 |
+
try:
|
| 17 |
+
# Intentar cargar un modelo de clasificación de texto para mejorar respuestas
|
| 18 |
+
self.classifier = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
|
| 19 |
+
print("Text classification model loaded successfully.")
|
| 20 |
+
except Exception as e:
|
| 21 |
+
print(f"Warning: Could not load classification model: {e}")
|
| 22 |
+
self.classifier = None
|
| 23 |
+
|
| 24 |
+
# Base de conocimiento simple para respuestas comunes
|
| 25 |
+
self.knowledge_base = {
|
| 26 |
+
"gaia": "GAIA (Global Artificial Intelligence Assistant) es un sistema avanzado diseñado para asistir en diversas tareas y análisis de datos.",
|
| 27 |
+
"ai": "La Inteligencia Artificial comprende sistemas diseñados para realizar tareas que normalmente requieren inteligencia humana.",
|
| 28 |
+
"machine learning": "El Machine Learning es una rama de la IA que permite a los sistemas aprender de datos sin ser programados explícitamente.",
|
| 29 |
+
"climate": "El cambio climático es un desafío global que requiere acciones coordinadas para reducir emisiones y adaptarse a sus efectos.",
|
| 30 |
+
"technology": "La tecnología moderna evoluciona rápidamente, transformando industrias y sociedades con innovaciones como IA, blockchain y computación cuántica."
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
# Caché para almacenar respuestas previas y mejorar eficiencia
|
| 34 |
+
self.response_cache = {}
|
| 35 |
+
|
| 36 |
def __call__(self, question: str) -> str:
|
| 37 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 38 |
+
|
| 39 |
+
# Verificar caché para respuestas previas
|
| 40 |
+
question_key = question.lower().strip()
|
| 41 |
+
if question_key in self.response_cache:
|
| 42 |
+
print(f"Using cached response for question")
|
| 43 |
+
return self.response_cache[question_key]
|
| 44 |
+
|
| 45 |
+
# Analizamos el sentimiento/tono de la pregunta si el modelo está disponible
|
| 46 |
+
sentiment = None
|
| 47 |
+
if self.classifier:
|
| 48 |
+
try:
|
| 49 |
+
sentiment_result = self.classifier(question)
|
| 50 |
+
sentiment = sentiment_result[0]['label']
|
| 51 |
+
print(f"Question sentiment: {sentiment}")
|
| 52 |
+
except Exception as e:
|
| 53 |
+
print(f"Error analyzing sentiment: {e}")
|
| 54 |
+
|
| 55 |
+
# Buscar palabras clave en la base de conocimiento
|
| 56 |
+
answer = None
|
| 57 |
+
for key, info in self.knowledge_base.items():
|
| 58 |
+
if key in question.lower():
|
| 59 |
+
answer = info
|
| 60 |
+
break
|
| 61 |
+
|
| 62 |
+
# Si no encontramos coincidencia en la base de conocimiento, generamos una respuesta contextual
|
| 63 |
+
if not answer:
|
| 64 |
+
# Extraer palabras clave de la pregunta
|
| 65 |
+
keywords = [word for word in question.lower().split()
|
| 66 |
+
if len(word) > 3 and word not in ["what", "when", "where", "how", "why", "who", "is", "are", "the", "and", "that"]]
|
| 67 |
+
|
| 68 |
+
if "what" in question.lower() or "define" in question.lower() or "explain" in question.lower():
|
| 69 |
+
answer = f"Basado en la investigación actual, {' '.join(keywords[:3])} representa un concepto importante con aplicaciones significativas en diversos campos. Los avances recientes muestran un progreso notable en su comprensión y utilización."
|
| 70 |
+
|
| 71 |
+
elif "how" in question.lower():
|
| 72 |
+
answer = f"El proceso relacionado con {' '.join(keywords[:2])} típicamente sigue varios pasos clave: evaluación inicial, planificación estratégica, implementación sistemática, monitoreo continuo y evaluación de resultados. Cada paso debe adaptarse al contexto específico para obtener resultados óptimos."
|
| 73 |
+
|
| 74 |
+
elif "why" in question.lower():
|
| 75 |
+
answer = f"La importancia de {' '.join(keywords[:2])} se debe a múltiples factores, incluyendo su impacto en la eficiencia operativa, capacidad para resolver problemas complejos y potencial para generar valor. Las investigaciones demuestran beneficios tangibles cuando se implementa correctamente."
|
| 76 |
+
|
| 77 |
+
elif "compare" in question.lower() or "difference" in question.lower():
|
| 78 |
+
if len(keywords) >= 2:
|
| 79 |
+
answer = f"Al comparar {keywords[0]} y {keywords[1]}, encontramos diferencias significativas en funcionalidad y aplicación, aunque comparten fundamentos teóricos similares. {keywords[0]} tiende a enfocarse más en aspectos específicos, mientras que {keywords[1]} ofrece un enfoque más amplio o especializado dependiendo del contexto."
|
| 80 |
+
|
| 81 |
+
else:
|
| 82 |
+
# Respuesta genérica para otros tipos de preguntas
|
| 83 |
+
answer = f"Analizando la cuestión sobre {' '.join(keywords[:3])}, podemos identificar varios aspectos relevantes. Desde una perspectiva teórica, representa una evolución importante en su campo. En la práctica, demuestra tanto fortalezas como áreas de mejora potencial. La investigación reciente sugiere enfoques evolucionados que abordan las limitaciones actuales."
|
| 84 |
+
|
| 85 |
+
# Ajustar tono según análisis de sentimiento
|
| 86 |
+
if sentiment == "POSITIVE":
|
| 87 |
+
answer = f"{answer} Esto presenta oportunidades significativas para innovación y desarrollo."
|
| 88 |
+
elif sentiment == "NEGATIVE":
|
| 89 |
+
answer = f"{answer} Es importante abordar los desafíos asociados con prudencia y estrategia."
|
| 90 |
+
|
| 91 |
+
# Guardar en caché para futuras consultas
|
| 92 |
+
self.response_cache[question_key] = answer
|
| 93 |
+
|
| 94 |
+
print(f"Agent returning answer (first 50 chars): {answer[:50]}...")
|
| 95 |
+
return answer
|
| 96 |
|
| 97 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 98 |
"""
|
|
|
|
| 221 |
gr.Markdown(
|
| 222 |
"""
|
| 223 |
**Instructions:**
|
|
|
|
| 224 |
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
| 225 |
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
| 226 |
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
|
|
|
| 227 |
---
|
| 228 |
**Disclaimers:**
|
| 229 |
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|