rubenml commited on
Commit
5e69db0
·
verified ·
1 Parent(s): d4dfb59

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -20
app.py CHANGED
@@ -14,36 +14,29 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
  class BasicAgent:
16
  def __init__(self):
17
- print("Initializing BERT-based QA agent...")
18
- # Cargar el modelo BERT preentrenado en SQuAD para tareas de Pregunta y Respuesta
19
- self.qa_pipeline = pipeline("question-answering", model="bert-large-uncased-whole-word-masking-finetuned-squad")
20
 
21
  def __call__(self, question: str) -> str:
22
  """
23
- Procesa la pregunta y devuelve una respuesta basada en el contexto proporcionado.
24
- Si no se proporciona contexto, devuelve un mensaje de error.
25
  """
26
- prompt = """You are a general AI assistant. I will ask you a question based on the provided context.
27
- Please provide the answer in a clear and concise manner."""
28
-
29
- # Crear un prompt dentro del contexto que estructure la tarea más explícitamente
30
  prompt = f"""
31
- You are a general AI assistant. I will ask you a question based on the provided context.
32
- Please provide the answer in a clear and concise manner.
33
- Question: {question}
34
- Context: {prompt}
35
- Answer:
36
- """
37
 
38
  try:
39
- # Usar el pipeline para obtener la respuesta de la pregunta con el contexto
40
- result = self.qa_pipeline(question=question, context=prompt)
41
- answer = result["answer"]
42
  except Exception as e:
43
- print(f"Error durante QA: {e}")
44
  answer = "Error processing question."
45
 
46
- # Devuelve la respuesta final con el formato requerido
47
  return answer
48
 
49
  def run_and_submit_all( profile: gr.OAuthProfile | None):
 
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
  class BasicAgent:
16
  def __init__(self):
17
+ print("Initializing T5-based QA agent with custom prompt...")
18
+ # Load the T5 model
19
+ self.qa_pipeline = pipeline("text2text-generation", model="t5-large")
20
 
21
  def __call__(self, question: str) -> str:
22
  """
23
+ Process the question using T5 with a detailed prompt and return an answer.
 
24
  """
25
+ # Create a more structured and detailed prompt
 
 
 
26
  prompt = f"""
27
+ You are an intelligent assistant. Please answer the following question as accurately as possible:
28
+ Question: {question}
29
+ Answer:
30
+ """
 
 
31
 
32
  try:
33
+ # Use the T5 pipeline to generate an answer
34
+ result = self.qa_pipeline(prompt)
35
+ answer = result[0]["generated_text"].strip()
36
  except Exception as e:
37
+ print(f"Error during QA: {e}")
38
  answer = "Error processing question."
39
 
 
40
  return answer
41
 
42
  def run_and_submit_all( profile: gr.OAuthProfile | None):