rubenml commited on
Commit
4448766
·
verified ·
1 Parent(s): f4c9709

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -34
app.py CHANGED
@@ -21,52 +21,37 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
21
  # --- Basic Agent Definition ---
22
  class GeneralAgent:
23
  def __init__(self):
24
- print("Initializing GPT-2 based QA agent...")
25
- self.model = GPT2LMHeadModel.from_pretrained("gpt2")
26
- self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
27
 
28
  def __call__(self, question: str, context: str = None) -> str:
29
  """
30
- Process the question and generate an answer based on the provided context.
 
31
  """
32
  if context is None:
33
- context = "No context provided." # Default context if none is given
34
 
35
- # Create a clear, structured prompt
36
  prompt = f"""
37
- You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
38
- YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings. If you are asked for a number, don't use commas to write your number nor use units such as $ or percent sign unless specified otherwise.
39
- If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
40
- If you are asked for a comma-separated list, apply the above rules depending on whether the element to be put in the list is a number or a string.
41
  Question: {question}
42
  Context: {context}
 
43
  """
44
 
45
- inputs = self.tokenizer.encode(prompt, return_tensors="pt")
46
-
47
- # Generate the answer with GPT-2
48
- outputs = self.model.generate(inputs, max_length=500, num_return_sequences=1, no_repeat_ngram_size=2, early_stopping=True)
49
-
50
- # Decode the generated answer
51
- answer = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
52
-
53
- # Extract the relevant part of the answer
54
- final_answer = self._extract_final_answer(answer)
55
-
56
- return f"FINAL ANSWER: {final_answer}"
57
-
58
- def _extract_final_answer(self, answer: str) -> str:
59
- """
60
- Extract the relevant part of the generated answer.
61
- """
62
- final_answer_start = "FINAL ANSWER:"
63
- start_idx = answer.find(final_answer_start)
64
-
65
- if start_idx == -1:
66
- return "Error processing question."
67
 
68
- final_answer = answer[start_idx + len(final_answer_start):].strip()
69
- return final_answer.strip()
70
 
71
 
72
 
 
21
  # --- Basic Agent Definition ---
22
  class GeneralAgent:
23
  def __init__(self):
24
+ print("Initializing BERT-based QA agent...")
25
+ # Cargar el modelo BERT preentrenado en SQuAD para tareas de Pregunta y Respuesta
26
+ self.qa_pipeline = pipeline("question-answering", model="bert-large-uncased-whole-word-masking-finetuned-squad")
27
 
28
  def __call__(self, question: str, context: str = None) -> str:
29
  """
30
+ Procesa la pregunta y devuelve una respuesta basada en el contexto proporcionado.
31
+ Si no se proporciona contexto, devuelve un mensaje de error.
32
  """
33
  if context is None:
34
+ return "FINAL ANSWER: No context provided."
35
 
36
+ # Crear un prompt dentro del contexto que estructure la tarea más explícitamente
37
  prompt = f"""
38
+ You are a general AI assistant. I will ask you a question based on the provided context.
39
+ Please provide the answer in a clear and concise manner.
 
 
40
  Question: {question}
41
  Context: {context}
42
+ Answer:
43
  """
44
 
45
+ try:
46
+ # Usar el pipeline para obtener la respuesta de la pregunta con el contexto
47
+ result = self.qa_pipeline(question=question, context=prompt)
48
+ answer = result["answer"]
49
+ except Exception as e:
50
+ print(f"Error durante QA: {e}")
51
+ answer = "Error processing question."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ # Devuelve la respuesta final con el formato requerido
54
+ return f"FINAL ANSWER: {answer}"
55
 
56
 
57